Source file src/encoding/json/jsontext/decode.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.jsonv2
     6  
     7  package jsontext
     8  
     9  import (
    10  	"bytes"
    11  	"errors"
    12  	"io"
    13  
    14  	"encoding/json/internal/jsonflags"
    15  	"encoding/json/internal/jsonopts"
    16  	"encoding/json/internal/jsonwire"
    17  )
    18  
    19  // NOTE: The logic for decoding is complicated by the fact that reading from
    20  // an io.Reader into a temporary buffer means that the buffer may contain a
    21  // truncated portion of some valid input, requiring the need to fetch more data.
    22  //
    23  // This file is structured in the following way:
    24  //
    25  //   - consumeXXX functions parse an exact JSON token from a []byte.
    26  //     If the buffer appears truncated, then it returns io.ErrUnexpectedEOF.
    27  //     The consumeSimpleXXX functions are so named because they only handle
    28  //     a subset of the grammar for the JSON token being parsed.
    29  //     They do not handle the full grammar to keep these functions inlinable.
    30  //
    31  //   - Decoder.consumeXXX methods parse the next JSON token from Decoder.buf,
    32  //     automatically fetching more input if necessary. These methods take
    33  //     a position relative to the start of Decoder.buf as an argument and
    34  //     return the end of the consumed JSON token as a position,
    35  //     also relative to the start of Decoder.buf.
    36  //
    37  //   - In the event of an I/O errors or state machine violations,
    38  //     the implementation avoids mutating the state of Decoder
    39  //     (aside from the book-keeping needed to implement Decoder.fetch).
    40  //     For this reason, only Decoder.ReadToken and Decoder.ReadValue are
    41  //     responsible for updated Decoder.prevStart and Decoder.prevEnd.
    42  //
    43  //   - For performance, much of the implementation uses the pattern of calling
    44  //     the inlinable consumeXXX functions first, and if more work is necessary,
    45  //     then it calls the slower Decoder.consumeXXX methods.
    46  //     TODO: Revisit this pattern if the Go compiler provides finer control
    47  //     over exactly which calls are inlined or not.
    48  
    49  // Decoder is a streaming decoder for raw JSON tokens and values.
    50  // It is used to read a stream of top-level JSON values,
    51  // each separated by optional whitespace characters.
    52  //
    53  // [Decoder.ReadToken] and [Decoder.ReadValue] calls may be interleaved.
    54  // For example, the following JSON value:
    55  //
    56  //	{"name":"value","array":[null,false,true,3.14159],"object":{"k":"v"}}
    57  //
    58  // can be parsed with the following calls (ignoring errors for brevity):
    59  //
    60  //	d.ReadToken() // {
    61  //	d.ReadToken() // "name"
    62  //	d.ReadToken() // "value"
    63  //	d.ReadValue() // "array"
    64  //	d.ReadToken() // [
    65  //	d.ReadToken() // null
    66  //	d.ReadToken() // false
    67  //	d.ReadValue() // true
    68  //	d.ReadToken() // 3.14159
    69  //	d.ReadToken() // ]
    70  //	d.ReadValue() // "object"
    71  //	d.ReadValue() // {"k":"v"}
    72  //	d.ReadToken() // }
    73  //
    74  // The above is one of many possible sequence of calls and
    75  // may not represent the most sensible method to call for any given token/value.
    76  // For example, it is probably more common to call [Decoder.ReadToken] to obtain a
    77  // string token for object names.
    78  type Decoder struct {
    79  	s decoderState
    80  }
    81  
    82  // decoderState is the low-level state of Decoder.
    83  // It has exported fields and method for use by the "json" package.
    84  type decoderState struct {
    85  	state
    86  	decodeBuffer
    87  	jsonopts.Struct
    88  
    89  	StringCache *[256]string // only used when unmarshaling; identical to json.stringCache
    90  }
    91  
    92  // decodeBuffer is a buffer split into 4 segments:
    93  //
    94  //   - buf[0:prevEnd]         // already read portion of the buffer
    95  //   - buf[prevStart:prevEnd] // previously read value
    96  //   - buf[prevEnd:len(buf)]  // unread portion of the buffer
    97  //   - buf[len(buf):cap(buf)] // unused portion of the buffer
    98  //
    99  // Invariants:
   100  //
   101  //	0 ≤ prevStart ≤ prevEnd ≤ len(buf) ≤ cap(buf)
   102  type decodeBuffer struct {
   103  	peekPos int   // non-zero if valid offset into buf for start of next token
   104  	peekErr error // implies peekPos is -1
   105  
   106  	buf       []byte // may alias rd if it is a bytes.Buffer
   107  	prevStart int
   108  	prevEnd   int
   109  
   110  	// baseOffset is added to prevStart and prevEnd to obtain
   111  	// the absolute offset relative to the start of io.Reader stream.
   112  	baseOffset int64
   113  
   114  	rd io.Reader
   115  }
   116  
   117  // NewDecoder constructs a new streaming decoder reading from r.
   118  //
   119  // If r is a [bytes.Buffer], then the decoder parses directly from the buffer
   120  // without first copying the contents to an intermediate buffer.
   121  // Additional writes to the buffer must not occur while the decoder is in use.
   122  func NewDecoder(r io.Reader, opts ...Options) *Decoder {
   123  	d := new(Decoder)
   124  	d.Reset(r, opts...)
   125  	return d
   126  }
   127  
   128  // Reset resets a decoder such that it is reading afresh from r and
   129  // configured with the provided options. Reset must not be called on an
   130  // a Decoder passed to the [encoding/json/v2.UnmarshalerFrom.UnmarshalJSONFrom] method
   131  // or the [encoding/json/v2.UnmarshalFromFunc] function.
   132  func (d *Decoder) Reset(r io.Reader, opts ...Options) {
   133  	switch {
   134  	case d == nil:
   135  		panic("jsontext: invalid nil Decoder")
   136  	case r == nil:
   137  		panic("jsontext: invalid nil io.Reader")
   138  	case d.s.Flags.Get(jsonflags.WithinArshalCall):
   139  		panic("jsontext: cannot reset Decoder passed to json.UnmarshalerFrom")
   140  	}
   141  	// If the decoder was previously aliasing a bytes.Buffer,
   142  	// invalidate the alias to avoid writing into the bytes.Buffer's
   143  	// internal buffer.
   144  	b := d.s.buf[:0]
   145  	if _, ok := d.s.rd.(*bytes.Buffer); ok {
   146  		b = nil // avoid reusing b since it aliases the previous bytes.Buffer.
   147  	}
   148  	d.s.reset(b, r, opts...)
   149  }
   150  
   151  func (d *decoderState) reset(b []byte, r io.Reader, opts ...Options) {
   152  	d.state.reset()
   153  	d.decodeBuffer = decodeBuffer{buf: b, rd: r}
   154  	opts2 := jsonopts.Struct{} // avoid mutating d.Struct in case it is part of opts
   155  	opts2.Join(opts...)
   156  	d.Struct = opts2
   157  }
   158  
   159  // Options returns the options used to construct the encoder and
   160  // may additionally contain semantic options passed to a
   161  // [encoding/json/v2.UnmarshalDecode] call.
   162  //
   163  // If operating within
   164  // a [encoding/json/v2.UnmarshalerFrom.UnmarshalJSONFrom] method call or
   165  // a [encoding/json/v2.UnmarshalFromFunc] function call,
   166  // then the returned options are only valid within the call.
   167  func (d *Decoder) Options() Options {
   168  	return &d.s.Struct
   169  }
   170  
   171  var errBufferWriteAfterNext = errors.New("invalid bytes.Buffer.Write call after calling bytes.Buffer.Next")
   172  
   173  // fetch reads at least 1 byte from the underlying io.Reader.
   174  // It returns io.ErrUnexpectedEOF if zero bytes were read and io.EOF was seen.
   175  func (d *decoderState) fetch() error {
   176  	if d.rd == nil {
   177  		return io.ErrUnexpectedEOF
   178  	}
   179  
   180  	// Inform objectNameStack that we are about to fetch new buffer content.
   181  	d.Names.copyQuotedBuffer(d.buf)
   182  
   183  	// Specialize bytes.Buffer for better performance.
   184  	if bb, ok := d.rd.(*bytes.Buffer); ok {
   185  		switch {
   186  		case bb.Len() == 0:
   187  			return io.ErrUnexpectedEOF
   188  		case len(d.buf) == 0:
   189  			d.buf = bb.Next(bb.Len()) // "read" all data in the buffer
   190  			return nil
   191  		default:
   192  			// This only occurs if a partially filled bytes.Buffer was provided
   193  			// and more data is written to it while Decoder is reading from it.
   194  			// This practice will lead to data corruption since future writes
   195  			// may overwrite the contents of the current buffer.
   196  			//
   197  			// The user is trying to use a bytes.Buffer as a pipe,
   198  			// but a bytes.Buffer is poor implementation of a pipe,
   199  			// the purpose-built io.Pipe should be used instead.
   200  			return &ioError{action: "read", err: errBufferWriteAfterNext}
   201  		}
   202  	}
   203  
   204  	// Allocate initial buffer if empty.
   205  	if cap(d.buf) == 0 {
   206  		d.buf = make([]byte, 0, 64)
   207  	}
   208  
   209  	// Check whether to grow the buffer.
   210  	const maxBufferSize = 4 << 10
   211  	const growthSizeFactor = 2 // higher value is faster
   212  	const growthRateFactor = 2 // higher value is slower
   213  	// By default, grow if below the maximum buffer size.
   214  	grow := cap(d.buf) <= maxBufferSize/growthSizeFactor
   215  	// Growing can be expensive, so only grow
   216  	// if a sufficient number of bytes have been processed.
   217  	grow = grow && int64(cap(d.buf)) < d.previousOffsetEnd()/growthRateFactor
   218  	// If prevStart==0, then fetch was called in order to fetch more data
   219  	// to finish consuming a large JSON value contiguously.
   220  	// Grow if less than 25% of the remaining capacity is available.
   221  	// Note that this may cause the input buffer to exceed maxBufferSize.
   222  	grow = grow || (d.prevStart == 0 && len(d.buf) >= 3*cap(d.buf)/4)
   223  
   224  	if grow {
   225  		// Allocate a new buffer and copy the contents of the old buffer over.
   226  		// TODO: Provide a hard limit on the maximum internal buffer size?
   227  		buf := make([]byte, 0, cap(d.buf)*growthSizeFactor)
   228  		d.buf = append(buf, d.buf[d.prevStart:]...)
   229  	} else {
   230  		// Move unread portion of the data to the front.
   231  		n := copy(d.buf[:cap(d.buf)], d.buf[d.prevStart:])
   232  		d.buf = d.buf[:n]
   233  	}
   234  	d.baseOffset += int64(d.prevStart)
   235  	d.prevEnd -= d.prevStart
   236  	d.prevStart = 0
   237  
   238  	// Read more data into the internal buffer.
   239  	for {
   240  		n, err := d.rd.Read(d.buf[len(d.buf):cap(d.buf)])
   241  		switch {
   242  		case n > 0:
   243  			d.buf = d.buf[:len(d.buf)+n]
   244  			return nil // ignore errors if any bytes are read
   245  		case err == io.EOF:
   246  			return io.ErrUnexpectedEOF
   247  		case err != nil:
   248  			return &ioError{action: "read", err: err}
   249  		default:
   250  			continue // Read returned (0, nil)
   251  		}
   252  	}
   253  }
   254  
   255  const invalidateBufferByte = '#' // invalid starting character for JSON grammar
   256  
   257  // invalidatePreviousRead invalidates buffers returned by Peek and Read calls
   258  // so that the first byte is an invalid character.
   259  // This Hyrum-proofs the API against faulty application code that assumes
   260  // values returned by ReadValue remain valid past subsequent Read calls.
   261  func (d *decodeBuffer) invalidatePreviousRead() {
   262  	// Avoid mutating the buffer if d.rd is nil which implies that d.buf
   263  	// is provided by the user code and may not expect mutations.
   264  	isBytesBuffer := func(r io.Reader) bool {
   265  		_, ok := r.(*bytes.Buffer)
   266  		return ok
   267  	}
   268  	if d.rd != nil && !isBytesBuffer(d.rd) && d.prevStart < d.prevEnd && uint(d.prevStart) < uint(len(d.buf)) {
   269  		d.buf[d.prevStart] = invalidateBufferByte
   270  		d.prevStart = d.prevEnd
   271  	}
   272  }
   273  
   274  // needMore reports whether there are no more unread bytes.
   275  func (d *decodeBuffer) needMore(pos int) bool {
   276  	// NOTE: The arguments and logic are kept simple to keep this inlinable.
   277  	return pos == len(d.buf)
   278  }
   279  
   280  func (d *decodeBuffer) offsetAt(pos int) int64     { return d.baseOffset + int64(pos) }
   281  func (d *decodeBuffer) previousOffsetStart() int64 { return d.baseOffset + int64(d.prevStart) }
   282  func (d *decodeBuffer) previousOffsetEnd() int64   { return d.baseOffset + int64(d.prevEnd) }
   283  func (d *decodeBuffer) previousBuffer() []byte     { return d.buf[d.prevStart:d.prevEnd] }
   284  func (d *decodeBuffer) unreadBuffer() []byte       { return d.buf[d.prevEnd:len(d.buf)] }
   285  
   286  // PreviousTokenOrValue returns the previously read token or value
   287  // unless it has been invalidated by a call to PeekKind.
   288  // If a token is just a delimiter, then this returns a 1-byte buffer.
   289  // This method is used for error reporting at the semantic layer.
   290  func (d *decodeBuffer) PreviousTokenOrValue() []byte {
   291  	b := d.previousBuffer()
   292  	// If peek was called, then the previous token or buffer is invalidated.
   293  	if d.peekPos > 0 || len(b) > 0 && b[0] == invalidateBufferByte {
   294  		return nil
   295  	}
   296  	// ReadToken does not preserve the buffer for null, bools, or delimiters.
   297  	// Manually re-construct that buffer.
   298  	if len(b) == 0 {
   299  		b = d.buf[:d.prevEnd] // entirety of the previous buffer
   300  		for _, tok := range []string{"null", "false", "true", "{", "}", "[", "]"} {
   301  			if len(b) >= len(tok) && string(b[len(b)-len(tok):]) == tok {
   302  				return b[len(b)-len(tok):]
   303  			}
   304  		}
   305  	}
   306  	return b
   307  }
   308  
   309  // PeekKind retrieves the next token kind, but does not advance the read offset.
   310  //
   311  // It returns 0 if an error occurs. Any such error is cached until
   312  // the next read call and it is the caller's responsibility to eventually
   313  // follow up a PeekKind call with a read call.
   314  func (d *Decoder) PeekKind() Kind {
   315  	return d.s.PeekKind()
   316  }
   317  func (d *decoderState) PeekKind() Kind {
   318  	// Check whether we have a cached peek result.
   319  	if d.peekPos > 0 {
   320  		return Kind(d.buf[d.peekPos]).normalize()
   321  	}
   322  
   323  	var err error
   324  	d.invalidatePreviousRead()
   325  	pos := d.prevEnd
   326  
   327  	// Consume leading whitespace.
   328  	pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   329  	if d.needMore(pos) {
   330  		if pos, err = d.consumeWhitespace(pos); err != nil {
   331  			if err == io.ErrUnexpectedEOF && d.Tokens.Depth() == 1 {
   332  				err = io.EOF // EOF possibly if no Tokens present after top-level value
   333  			}
   334  			d.peekPos, d.peekErr = -1, wrapSyntacticError(d, err, pos, 0)
   335  			return invalidKind
   336  		}
   337  	}
   338  
   339  	// Consume colon or comma.
   340  	var delim byte
   341  	if c := d.buf[pos]; c == ':' || c == ',' {
   342  		delim = c
   343  		pos += 1
   344  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   345  		if d.needMore(pos) {
   346  			if pos, err = d.consumeWhitespace(pos); err != nil {
   347  				err = wrapSyntacticError(d, err, pos, 0)
   348  				d.peekPos, d.peekErr = -1, d.checkDelimBeforeIOError(delim, err)
   349  				return invalidKind
   350  			}
   351  		}
   352  	}
   353  	next := Kind(d.buf[pos]).normalize()
   354  	if d.Tokens.needDelim(next) != delim {
   355  		d.peekPos, d.peekErr = -1, d.checkDelim(delim, next)
   356  		return invalidKind
   357  	}
   358  
   359  	// This may set peekPos to zero, which is indistinguishable from
   360  	// the uninitialized state. While a small hit to performance, it is correct
   361  	// since ReadValue and ReadToken will disregard the cached result and
   362  	// recompute the next kind.
   363  	d.peekPos, d.peekErr = pos, nil
   364  	return next
   365  }
   366  
   367  // checkDelimBeforeIOError checks whether the delim is even valid
   368  // before returning an IO error, which occurs after the delim.
   369  func (d *decoderState) checkDelimBeforeIOError(delim byte, err error) error {
   370  	// Since an IO error occurred, we do not know what the next kind is.
   371  	// However, knowing the next kind is necessary to validate
   372  	// whether the current delim is at least potentially valid.
   373  	// Since a JSON string is always valid as the next token,
   374  	// conservatively assume that is the next kind for validation.
   375  	const next = Kind('"')
   376  	if d.Tokens.needDelim(next) != delim {
   377  		err = d.checkDelim(delim, next)
   378  	}
   379  	return err
   380  }
   381  
   382  // CountNextDelimWhitespace counts the number of upcoming bytes of
   383  // delimiter or whitespace characters.
   384  // This method is used for error reporting at the semantic layer.
   385  func (d *decoderState) CountNextDelimWhitespace() int {
   386  	d.PeekKind() // populate unreadBuffer
   387  	return len(d.unreadBuffer()) - len(bytes.TrimLeft(d.unreadBuffer(), ",: \n\r\t"))
   388  }
   389  
   390  // checkDelim checks whether delim is valid for the given next kind.
   391  func (d *decoderState) checkDelim(delim byte, next Kind) error {
   392  	where := "at start of value"
   393  	switch d.Tokens.needDelim(next) {
   394  	case delim:
   395  		return nil
   396  	case ':':
   397  		where = "after object name (expecting ':')"
   398  	case ',':
   399  		if d.Tokens.Last.isObject() {
   400  			where = "after object value (expecting ',' or '}')"
   401  		} else {
   402  			where = "after array element (expecting ',' or ']')"
   403  		}
   404  	}
   405  	pos := d.prevEnd // restore position to right after leading whitespace
   406  	pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   407  	err := jsonwire.NewInvalidCharacterError(d.buf[pos:], where)
   408  	return wrapSyntacticError(d, err, pos, 0)
   409  }
   410  
   411  // SkipValue is semantically equivalent to calling [Decoder.ReadValue] and discarding
   412  // the result except that memory is not wasted trying to hold the entire result.
   413  func (d *Decoder) SkipValue() error {
   414  	return d.s.SkipValue()
   415  }
   416  func (d *decoderState) SkipValue() error {
   417  	switch d.PeekKind() {
   418  	case '{', '[':
   419  		// For JSON objects and arrays, keep skipping all tokens
   420  		// until the depth matches the starting depth.
   421  		depth := d.Tokens.Depth()
   422  		for {
   423  			if _, err := d.ReadToken(); err != nil {
   424  				return err
   425  			}
   426  			if depth >= d.Tokens.Depth() {
   427  				return nil
   428  			}
   429  		}
   430  	default:
   431  		// Trying to skip a value when the next token is a '}' or ']'
   432  		// will result in an error being returned here.
   433  		var flags jsonwire.ValueFlags
   434  		if _, err := d.ReadValue(&flags); err != nil {
   435  			return err
   436  		}
   437  		return nil
   438  	}
   439  }
   440  
   441  // SkipValueRemainder skips the remainder of a value
   442  // after reading a '{' or '[' token.
   443  func (d *decoderState) SkipValueRemainder() error {
   444  	if d.Tokens.Depth()-1 > 0 && d.Tokens.Last.Length() == 0 {
   445  		for n := d.Tokens.Depth(); d.Tokens.Depth() >= n; {
   446  			if _, err := d.ReadToken(); err != nil {
   447  				return err
   448  			}
   449  		}
   450  	}
   451  	return nil
   452  }
   453  
   454  // SkipUntil skips all tokens until the state machine
   455  // is at or past the specified depth and length.
   456  func (d *decoderState) SkipUntil(depth int, length int64) error {
   457  	for d.Tokens.Depth() > depth || (d.Tokens.Depth() == depth && d.Tokens.Last.Length() < length) {
   458  		if _, err := d.ReadToken(); err != nil {
   459  			return err
   460  		}
   461  	}
   462  	return nil
   463  }
   464  
   465  // ReadToken reads the next [Token], advancing the read offset.
   466  // The returned token is only valid until the next Peek, Read, or Skip call.
   467  // It returns [io.EOF] if there are no more tokens.
   468  func (d *Decoder) ReadToken() (Token, error) {
   469  	return d.s.ReadToken()
   470  }
   471  func (d *decoderState) ReadToken() (Token, error) {
   472  	// Determine the next kind.
   473  	var err error
   474  	var next Kind
   475  	pos := d.peekPos
   476  	if pos != 0 {
   477  		// Use cached peek result.
   478  		if d.peekErr != nil {
   479  			err := d.peekErr
   480  			d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error
   481  			return Token{}, err
   482  		}
   483  		next = Kind(d.buf[pos]).normalize()
   484  		d.peekPos = 0 // reset cache
   485  	} else {
   486  		d.invalidatePreviousRead()
   487  		pos = d.prevEnd
   488  
   489  		// Consume leading whitespace.
   490  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   491  		if d.needMore(pos) {
   492  			if pos, err = d.consumeWhitespace(pos); err != nil {
   493  				if err == io.ErrUnexpectedEOF && d.Tokens.Depth() == 1 {
   494  					err = io.EOF // EOF possibly if no Tokens present after top-level value
   495  				}
   496  				return Token{}, wrapSyntacticError(d, err, pos, 0)
   497  			}
   498  		}
   499  
   500  		// Consume colon or comma.
   501  		var delim byte
   502  		if c := d.buf[pos]; c == ':' || c == ',' {
   503  			delim = c
   504  			pos += 1
   505  			pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   506  			if d.needMore(pos) {
   507  				if pos, err = d.consumeWhitespace(pos); err != nil {
   508  					err = wrapSyntacticError(d, err, pos, 0)
   509  					return Token{}, d.checkDelimBeforeIOError(delim, err)
   510  				}
   511  			}
   512  		}
   513  		next = Kind(d.buf[pos]).normalize()
   514  		if d.Tokens.needDelim(next) != delim {
   515  			return Token{}, d.checkDelim(delim, next)
   516  		}
   517  	}
   518  
   519  	// Handle the next token.
   520  	var n int
   521  	switch next {
   522  	case 'n':
   523  		if jsonwire.ConsumeNull(d.buf[pos:]) == 0 {
   524  			pos, err = d.consumeLiteral(pos, "null")
   525  			if err != nil {
   526  				return Token{}, wrapSyntacticError(d, err, pos, +1)
   527  			}
   528  		} else {
   529  			pos += len("null")
   530  		}
   531  		if err = d.Tokens.appendLiteral(); err != nil {
   532  			return Token{}, wrapSyntacticError(d, err, pos-len("null"), +1) // report position at start of literal
   533  		}
   534  		d.prevStart, d.prevEnd = pos, pos
   535  		return Null, nil
   536  
   537  	case 'f':
   538  		if jsonwire.ConsumeFalse(d.buf[pos:]) == 0 {
   539  			pos, err = d.consumeLiteral(pos, "false")
   540  			if err != nil {
   541  				return Token{}, wrapSyntacticError(d, err, pos, +1)
   542  			}
   543  		} else {
   544  			pos += len("false")
   545  		}
   546  		if err = d.Tokens.appendLiteral(); err != nil {
   547  			return Token{}, wrapSyntacticError(d, err, pos-len("false"), +1) // report position at start of literal
   548  		}
   549  		d.prevStart, d.prevEnd = pos, pos
   550  		return False, nil
   551  
   552  	case 't':
   553  		if jsonwire.ConsumeTrue(d.buf[pos:]) == 0 {
   554  			pos, err = d.consumeLiteral(pos, "true")
   555  			if err != nil {
   556  				return Token{}, wrapSyntacticError(d, err, pos, +1)
   557  			}
   558  		} else {
   559  			pos += len("true")
   560  		}
   561  		if err = d.Tokens.appendLiteral(); err != nil {
   562  			return Token{}, wrapSyntacticError(d, err, pos-len("true"), +1) // report position at start of literal
   563  		}
   564  		d.prevStart, d.prevEnd = pos, pos
   565  		return True, nil
   566  
   567  	case '"':
   568  		var flags jsonwire.ValueFlags // TODO: Preserve this in Token?
   569  		if n = jsonwire.ConsumeSimpleString(d.buf[pos:]); n == 0 {
   570  			oldAbsPos := d.baseOffset + int64(pos)
   571  			pos, err = d.consumeString(&flags, pos)
   572  			newAbsPos := d.baseOffset + int64(pos)
   573  			n = int(newAbsPos - oldAbsPos)
   574  			if err != nil {
   575  				return Token{}, wrapSyntacticError(d, err, pos, +1)
   576  			}
   577  		} else {
   578  			pos += n
   579  		}
   580  		if d.Tokens.Last.NeedObjectName() {
   581  			if !d.Flags.Get(jsonflags.AllowDuplicateNames) {
   582  				if !d.Tokens.Last.isValidNamespace() {
   583  					return Token{}, wrapSyntacticError(d, errInvalidNamespace, pos-n, +1)
   584  				}
   585  				if d.Tokens.Last.isActiveNamespace() && !d.Namespaces.Last().insertQuoted(d.buf[pos-n:pos], flags.IsVerbatim()) {
   586  					err = wrapWithObjectName(ErrDuplicateName, d.buf[pos-n:pos])
   587  					return Token{}, wrapSyntacticError(d, err, pos-n, +1) // report position at start of string
   588  				}
   589  			}
   590  			d.Names.ReplaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds
   591  		}
   592  		if err = d.Tokens.appendString(); err != nil {
   593  			return Token{}, wrapSyntacticError(d, err, pos-n, +1) // report position at start of string
   594  		}
   595  		d.prevStart, d.prevEnd = pos-n, pos
   596  		return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil
   597  
   598  	case '0':
   599  		// NOTE: Since JSON numbers are not self-terminating,
   600  		// we need to make sure that the next byte is not part of a number.
   601  		if n = jsonwire.ConsumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) {
   602  			oldAbsPos := d.baseOffset + int64(pos)
   603  			pos, err = d.consumeNumber(pos)
   604  			newAbsPos := d.baseOffset + int64(pos)
   605  			n = int(newAbsPos - oldAbsPos)
   606  			if err != nil {
   607  				return Token{}, wrapSyntacticError(d, err, pos, +1)
   608  			}
   609  		} else {
   610  			pos += n
   611  		}
   612  		if err = d.Tokens.appendNumber(); err != nil {
   613  			return Token{}, wrapSyntacticError(d, err, pos-n, +1) // report position at start of number
   614  		}
   615  		d.prevStart, d.prevEnd = pos-n, pos
   616  		return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil
   617  
   618  	case '{':
   619  		if err = d.Tokens.pushObject(); err != nil {
   620  			return Token{}, wrapSyntacticError(d, err, pos, +1)
   621  		}
   622  		d.Names.push()
   623  		if !d.Flags.Get(jsonflags.AllowDuplicateNames) {
   624  			d.Namespaces.push()
   625  		}
   626  		pos += 1
   627  		d.prevStart, d.prevEnd = pos, pos
   628  		return BeginObject, nil
   629  
   630  	case '}':
   631  		if err = d.Tokens.popObject(); err != nil {
   632  			return Token{}, wrapSyntacticError(d, err, pos, +1)
   633  		}
   634  		d.Names.pop()
   635  		if !d.Flags.Get(jsonflags.AllowDuplicateNames) {
   636  			d.Namespaces.pop()
   637  		}
   638  		pos += 1
   639  		d.prevStart, d.prevEnd = pos, pos
   640  		return EndObject, nil
   641  
   642  	case '[':
   643  		if err = d.Tokens.pushArray(); err != nil {
   644  			return Token{}, wrapSyntacticError(d, err, pos, +1)
   645  		}
   646  		pos += 1
   647  		d.prevStart, d.prevEnd = pos, pos
   648  		return BeginArray, nil
   649  
   650  	case ']':
   651  		if err = d.Tokens.popArray(); err != nil {
   652  			return Token{}, wrapSyntacticError(d, err, pos, +1)
   653  		}
   654  		pos += 1
   655  		d.prevStart, d.prevEnd = pos, pos
   656  		return EndArray, nil
   657  
   658  	default:
   659  		err = jsonwire.NewInvalidCharacterError(d.buf[pos:], "at start of value")
   660  		return Token{}, wrapSyntacticError(d, err, pos, +1)
   661  	}
   662  }
   663  
   664  // ReadValue returns the next raw JSON value, advancing the read offset.
   665  // The value is stripped of any leading or trailing whitespace and
   666  // contains the exact bytes of the input, which may contain invalid UTF-8
   667  // if [AllowInvalidUTF8] is specified.
   668  //
   669  // The returned value is only valid until the next Peek, Read, or Skip call and
   670  // may not be mutated while the Decoder remains in use.
   671  // If the decoder is currently at the end token for an object or array,
   672  // then it reports a [SyntacticError] and the internal state remains unchanged.
   673  // It returns [io.EOF] if there are no more values.
   674  func (d *Decoder) ReadValue() (Value, error) {
   675  	var flags jsonwire.ValueFlags
   676  	return d.s.ReadValue(&flags)
   677  }
   678  func (d *decoderState) ReadValue(flags *jsonwire.ValueFlags) (Value, error) {
   679  	// Determine the next kind.
   680  	var err error
   681  	var next Kind
   682  	pos := d.peekPos
   683  	if pos != 0 {
   684  		// Use cached peek result.
   685  		if d.peekErr != nil {
   686  			err := d.peekErr
   687  			d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error
   688  			return nil, err
   689  		}
   690  		next = Kind(d.buf[pos]).normalize()
   691  		d.peekPos = 0 // reset cache
   692  	} else {
   693  		d.invalidatePreviousRead()
   694  		pos = d.prevEnd
   695  
   696  		// Consume leading whitespace.
   697  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   698  		if d.needMore(pos) {
   699  			if pos, err = d.consumeWhitespace(pos); err != nil {
   700  				if err == io.ErrUnexpectedEOF && d.Tokens.Depth() == 1 {
   701  					err = io.EOF // EOF possibly if no Tokens present after top-level value
   702  				}
   703  				return nil, wrapSyntacticError(d, err, pos, 0)
   704  			}
   705  		}
   706  
   707  		// Consume colon or comma.
   708  		var delim byte
   709  		if c := d.buf[pos]; c == ':' || c == ',' {
   710  			delim = c
   711  			pos += 1
   712  			pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   713  			if d.needMore(pos) {
   714  				if pos, err = d.consumeWhitespace(pos); err != nil {
   715  					err = wrapSyntacticError(d, err, pos, 0)
   716  					return nil, d.checkDelimBeforeIOError(delim, err)
   717  				}
   718  			}
   719  		}
   720  		next = Kind(d.buf[pos]).normalize()
   721  		if d.Tokens.needDelim(next) != delim {
   722  			return nil, d.checkDelim(delim, next)
   723  		}
   724  	}
   725  
   726  	// Handle the next value.
   727  	oldAbsPos := d.baseOffset + int64(pos)
   728  	pos, err = d.consumeValue(flags, pos, d.Tokens.Depth())
   729  	newAbsPos := d.baseOffset + int64(pos)
   730  	n := int(newAbsPos - oldAbsPos)
   731  	if err != nil {
   732  		return nil, wrapSyntacticError(d, err, pos, +1)
   733  	}
   734  	switch next {
   735  	case 'n', 't', 'f':
   736  		err = d.Tokens.appendLiteral()
   737  	case '"':
   738  		if d.Tokens.Last.NeedObjectName() {
   739  			if !d.Flags.Get(jsonflags.AllowDuplicateNames) {
   740  				if !d.Tokens.Last.isValidNamespace() {
   741  					err = errInvalidNamespace
   742  					break
   743  				}
   744  				if d.Tokens.Last.isActiveNamespace() && !d.Namespaces.Last().insertQuoted(d.buf[pos-n:pos], flags.IsVerbatim()) {
   745  					err = wrapWithObjectName(ErrDuplicateName, d.buf[pos-n:pos])
   746  					break
   747  				}
   748  			}
   749  			d.Names.ReplaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds
   750  		}
   751  		err = d.Tokens.appendString()
   752  	case '0':
   753  		err = d.Tokens.appendNumber()
   754  	case '{':
   755  		if err = d.Tokens.pushObject(); err != nil {
   756  			break
   757  		}
   758  		if err = d.Tokens.popObject(); err != nil {
   759  			panic("BUG: popObject should never fail immediately after pushObject: " + err.Error())
   760  		}
   761  	case '[':
   762  		if err = d.Tokens.pushArray(); err != nil {
   763  			break
   764  		}
   765  		if err = d.Tokens.popArray(); err != nil {
   766  			panic("BUG: popArray should never fail immediately after pushArray: " + err.Error())
   767  		}
   768  	}
   769  	if err != nil {
   770  		return nil, wrapSyntacticError(d, err, pos-n, +1) // report position at start of value
   771  	}
   772  	d.prevEnd = pos
   773  	d.prevStart = pos - n
   774  	return d.buf[pos-n : pos : pos], nil
   775  }
   776  
   777  // CheckNextValue checks whether the next value is syntactically valid,
   778  // but does not advance the read offset.
   779  // If last, it verifies that the stream cleanly terminates with [io.EOF].
   780  func (d *decoderState) CheckNextValue(last bool) error {
   781  	d.PeekKind() // populates d.peekPos and d.peekErr
   782  	pos, err := d.peekPos, d.peekErr
   783  	d.peekPos, d.peekErr = 0, nil
   784  	if err != nil {
   785  		return err
   786  	}
   787  
   788  	var flags jsonwire.ValueFlags
   789  	if pos, err := d.consumeValue(&flags, pos, d.Tokens.Depth()); err != nil {
   790  		return wrapSyntacticError(d, err, pos, +1)
   791  	} else if last {
   792  		return d.checkEOF(pos)
   793  	}
   794  	return nil
   795  }
   796  
   797  // CheckEOF verifies that the input has no more data.
   798  func (d *decoderState) CheckEOF() error {
   799  	return d.checkEOF(d.prevEnd)
   800  }
   801  func (d *decoderState) checkEOF(pos int) error {
   802  	switch pos, err := d.consumeWhitespace(pos); err {
   803  	case nil:
   804  		err := jsonwire.NewInvalidCharacterError(d.buf[pos:], "after top-level value")
   805  		return wrapSyntacticError(d, err, pos, 0)
   806  	case io.ErrUnexpectedEOF:
   807  		return nil
   808  	default:
   809  		return err
   810  	}
   811  }
   812  
   813  // consumeWhitespace consumes all whitespace starting at d.buf[pos:].
   814  // It returns the new position in d.buf immediately after the last whitespace.
   815  // If it returns nil, there is guaranteed to at least be one unread byte.
   816  //
   817  // The following pattern is common in this implementation:
   818  //
   819  //	pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   820  //	if d.needMore(pos) {
   821  //		if pos, err = d.consumeWhitespace(pos); err != nil {
   822  //			return ...
   823  //		}
   824  //	}
   825  //
   826  // It is difficult to simplify this without sacrificing performance since
   827  // consumeWhitespace must be inlined. The body of the if statement is
   828  // executed only in rare situations where we need to fetch more data.
   829  // Since fetching may return an error, we also need to check the error.
   830  func (d *decoderState) consumeWhitespace(pos int) (newPos int, err error) {
   831  	for {
   832  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   833  		if d.needMore(pos) {
   834  			absPos := d.baseOffset + int64(pos)
   835  			err = d.fetch() // will mutate d.buf and invalidate pos
   836  			pos = int(absPos - d.baseOffset)
   837  			if err != nil {
   838  				return pos, err
   839  			}
   840  			continue
   841  		}
   842  		return pos, nil
   843  	}
   844  }
   845  
   846  // consumeValue consumes a single JSON value starting at d.buf[pos:].
   847  // It returns the new position in d.buf immediately after the value.
   848  func (d *decoderState) consumeValue(flags *jsonwire.ValueFlags, pos, depth int) (newPos int, err error) {
   849  	for {
   850  		var n int
   851  		var err error
   852  		switch next := Kind(d.buf[pos]).normalize(); next {
   853  		case 'n':
   854  			if n = jsonwire.ConsumeNull(d.buf[pos:]); n == 0 {
   855  				n, err = jsonwire.ConsumeLiteral(d.buf[pos:], "null")
   856  			}
   857  		case 'f':
   858  			if n = jsonwire.ConsumeFalse(d.buf[pos:]); n == 0 {
   859  				n, err = jsonwire.ConsumeLiteral(d.buf[pos:], "false")
   860  			}
   861  		case 't':
   862  			if n = jsonwire.ConsumeTrue(d.buf[pos:]); n == 0 {
   863  				n, err = jsonwire.ConsumeLiteral(d.buf[pos:], "true")
   864  			}
   865  		case '"':
   866  			if n = jsonwire.ConsumeSimpleString(d.buf[pos:]); n == 0 {
   867  				return d.consumeString(flags, pos)
   868  			}
   869  		case '0':
   870  			// NOTE: Since JSON numbers are not self-terminating,
   871  			// we need to make sure that the next byte is not part of a number.
   872  			if n = jsonwire.ConsumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) {
   873  				return d.consumeNumber(pos)
   874  			}
   875  		case '{':
   876  			return d.consumeObject(flags, pos, depth)
   877  		case '[':
   878  			return d.consumeArray(flags, pos, depth)
   879  		default:
   880  			if (d.Tokens.Last.isObject() && next == ']') || (d.Tokens.Last.isArray() && next == '}') {
   881  				return pos, errMismatchDelim
   882  			}
   883  			return pos, jsonwire.NewInvalidCharacterError(d.buf[pos:], "at start of value")
   884  		}
   885  		if err == io.ErrUnexpectedEOF {
   886  			absPos := d.baseOffset + int64(pos)
   887  			err = d.fetch() // will mutate d.buf and invalidate pos
   888  			pos = int(absPos - d.baseOffset)
   889  			if err != nil {
   890  				return pos + n, err
   891  			}
   892  			continue
   893  		}
   894  		return pos + n, err
   895  	}
   896  }
   897  
   898  // consumeLiteral consumes a single JSON literal starting at d.buf[pos:].
   899  // It returns the new position in d.buf immediately after the literal.
   900  func (d *decoderState) consumeLiteral(pos int, lit string) (newPos int, err error) {
   901  	for {
   902  		n, err := jsonwire.ConsumeLiteral(d.buf[pos:], lit)
   903  		if err == io.ErrUnexpectedEOF {
   904  			absPos := d.baseOffset + int64(pos)
   905  			err = d.fetch() // will mutate d.buf and invalidate pos
   906  			pos = int(absPos - d.baseOffset)
   907  			if err != nil {
   908  				return pos + n, err
   909  			}
   910  			continue
   911  		}
   912  		return pos + n, err
   913  	}
   914  }
   915  
   916  // consumeString consumes a single JSON string starting at d.buf[pos:].
   917  // It returns the new position in d.buf immediately after the string.
   918  func (d *decoderState) consumeString(flags *jsonwire.ValueFlags, pos int) (newPos int, err error) {
   919  	var n int
   920  	for {
   921  		n, err = jsonwire.ConsumeStringResumable(flags, d.buf[pos:], n, !d.Flags.Get(jsonflags.AllowInvalidUTF8))
   922  		if err == io.ErrUnexpectedEOF {
   923  			absPos := d.baseOffset + int64(pos)
   924  			err = d.fetch() // will mutate d.buf and invalidate pos
   925  			pos = int(absPos - d.baseOffset)
   926  			if err != nil {
   927  				return pos + n, err
   928  			}
   929  			continue
   930  		}
   931  		return pos + n, err
   932  	}
   933  }
   934  
   935  // consumeNumber consumes a single JSON number starting at d.buf[pos:].
   936  // It returns the new position in d.buf immediately after the number.
   937  func (d *decoderState) consumeNumber(pos int) (newPos int, err error) {
   938  	var n int
   939  	var state jsonwire.ConsumeNumberState
   940  	for {
   941  		n, state, err = jsonwire.ConsumeNumberResumable(d.buf[pos:], n, state)
   942  		// NOTE: Since JSON numbers are not self-terminating,
   943  		// we need to make sure that the next byte is not part of a number.
   944  		if err == io.ErrUnexpectedEOF || d.needMore(pos+n) {
   945  			mayTerminate := err == nil
   946  			absPos := d.baseOffset + int64(pos)
   947  			err = d.fetch() // will mutate d.buf and invalidate pos
   948  			pos = int(absPos - d.baseOffset)
   949  			if err != nil {
   950  				if mayTerminate && err == io.ErrUnexpectedEOF {
   951  					return pos + n, nil
   952  				}
   953  				return pos, err
   954  			}
   955  			continue
   956  		}
   957  		return pos + n, err
   958  	}
   959  }
   960  
   961  // consumeObject consumes a single JSON object starting at d.buf[pos:].
   962  // It returns the new position in d.buf immediately after the object.
   963  func (d *decoderState) consumeObject(flags *jsonwire.ValueFlags, pos, depth int) (newPos int, err error) {
   964  	var n int
   965  	var names *objectNamespace
   966  	if !d.Flags.Get(jsonflags.AllowDuplicateNames) {
   967  		d.Namespaces.push()
   968  		defer d.Namespaces.pop()
   969  		names = d.Namespaces.Last()
   970  	}
   971  
   972  	// Handle before start.
   973  	if uint(pos) >= uint(len(d.buf)) || d.buf[pos] != '{' {
   974  		panic("BUG: consumeObject must be called with a buffer that starts with '{'")
   975  	} else if depth == maxNestingDepth+1 {
   976  		return pos, errMaxDepth
   977  	}
   978  	pos++
   979  
   980  	// Handle after start.
   981  	pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   982  	if d.needMore(pos) {
   983  		if pos, err = d.consumeWhitespace(pos); err != nil {
   984  			return pos, err
   985  		}
   986  	}
   987  	if d.buf[pos] == '}' {
   988  		pos++
   989  		return pos, nil
   990  	}
   991  
   992  	depth++
   993  	for {
   994  		// Handle before name.
   995  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
   996  		if d.needMore(pos) {
   997  			if pos, err = d.consumeWhitespace(pos); err != nil {
   998  				return pos, err
   999  			}
  1000  		}
  1001  		var flags2 jsonwire.ValueFlags
  1002  		if n = jsonwire.ConsumeSimpleString(d.buf[pos:]); n == 0 {
  1003  			oldAbsPos := d.baseOffset + int64(pos)
  1004  			pos, err = d.consumeString(&flags2, pos)
  1005  			newAbsPos := d.baseOffset + int64(pos)
  1006  			n = int(newAbsPos - oldAbsPos)
  1007  			flags.Join(flags2)
  1008  			if err != nil {
  1009  				return pos, err
  1010  			}
  1011  		} else {
  1012  			pos += n
  1013  		}
  1014  		quotedName := d.buf[pos-n : pos]
  1015  		if !d.Flags.Get(jsonflags.AllowDuplicateNames) && !names.insertQuoted(quotedName, flags2.IsVerbatim()) {
  1016  			return pos - n, wrapWithObjectName(ErrDuplicateName, quotedName)
  1017  		}
  1018  
  1019  		// Handle after name.
  1020  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
  1021  		if d.needMore(pos) {
  1022  			if pos, err = d.consumeWhitespace(pos); err != nil {
  1023  				return pos, wrapWithObjectName(err, quotedName)
  1024  			}
  1025  		}
  1026  		if d.buf[pos] != ':' {
  1027  			err := jsonwire.NewInvalidCharacterError(d.buf[pos:], "after object name (expecting ':')")
  1028  			return pos, wrapWithObjectName(err, quotedName)
  1029  		}
  1030  		pos++
  1031  
  1032  		// Handle before value.
  1033  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
  1034  		if d.needMore(pos) {
  1035  			if pos, err = d.consumeWhitespace(pos); err != nil {
  1036  				return pos, wrapWithObjectName(err, quotedName)
  1037  			}
  1038  		}
  1039  		pos, err = d.consumeValue(flags, pos, depth)
  1040  		if err != nil {
  1041  			return pos, wrapWithObjectName(err, quotedName)
  1042  		}
  1043  
  1044  		// Handle after value.
  1045  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
  1046  		if d.needMore(pos) {
  1047  			if pos, err = d.consumeWhitespace(pos); err != nil {
  1048  				return pos, err
  1049  			}
  1050  		}
  1051  		switch d.buf[pos] {
  1052  		case ',':
  1053  			pos++
  1054  			continue
  1055  		case '}':
  1056  			pos++
  1057  			return pos, nil
  1058  		default:
  1059  			return pos, jsonwire.NewInvalidCharacterError(d.buf[pos:], "after object value (expecting ',' or '}')")
  1060  		}
  1061  	}
  1062  }
  1063  
  1064  // consumeArray consumes a single JSON array starting at d.buf[pos:].
  1065  // It returns the new position in d.buf immediately after the array.
  1066  func (d *decoderState) consumeArray(flags *jsonwire.ValueFlags, pos, depth int) (newPos int, err error) {
  1067  	// Handle before start.
  1068  	if uint(pos) >= uint(len(d.buf)) || d.buf[pos] != '[' {
  1069  		panic("BUG: consumeArray must be called with a buffer that starts with '['")
  1070  	} else if depth == maxNestingDepth+1 {
  1071  		return pos, errMaxDepth
  1072  	}
  1073  	pos++
  1074  
  1075  	// Handle after start.
  1076  	pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
  1077  	if d.needMore(pos) {
  1078  		if pos, err = d.consumeWhitespace(pos); err != nil {
  1079  			return pos, err
  1080  		}
  1081  	}
  1082  	if d.buf[pos] == ']' {
  1083  		pos++
  1084  		return pos, nil
  1085  	}
  1086  
  1087  	var idx int64
  1088  	depth++
  1089  	for {
  1090  		// Handle before value.
  1091  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
  1092  		if d.needMore(pos) {
  1093  			if pos, err = d.consumeWhitespace(pos); err != nil {
  1094  				return pos, err
  1095  			}
  1096  		}
  1097  		pos, err = d.consumeValue(flags, pos, depth)
  1098  		if err != nil {
  1099  			return pos, wrapWithArrayIndex(err, idx)
  1100  		}
  1101  
  1102  		// Handle after value.
  1103  		pos += jsonwire.ConsumeWhitespace(d.buf[pos:])
  1104  		if d.needMore(pos) {
  1105  			if pos, err = d.consumeWhitespace(pos); err != nil {
  1106  				return pos, err
  1107  			}
  1108  		}
  1109  		switch d.buf[pos] {
  1110  		case ',':
  1111  			pos++
  1112  			idx++
  1113  			continue
  1114  		case ']':
  1115  			pos++
  1116  			return pos, nil
  1117  		default:
  1118  			return pos, jsonwire.NewInvalidCharacterError(d.buf[pos:], "after array element (expecting ',' or ']')")
  1119  		}
  1120  	}
  1121  }
  1122  
  1123  // InputOffset returns the current input byte offset. It gives the location
  1124  // of the next byte immediately after the most recently returned token or value.
  1125  // The number of bytes actually read from the underlying [io.Reader] may be more
  1126  // than this offset due to internal buffering effects.
  1127  func (d *Decoder) InputOffset() int64 {
  1128  	return d.s.previousOffsetEnd()
  1129  }
  1130  
  1131  // UnreadBuffer returns the data remaining in the unread buffer,
  1132  // which may contain zero or more bytes.
  1133  // The returned buffer must not be mutated while Decoder continues to be used.
  1134  // The buffer contents are valid until the next Peek, Read, or Skip call.
  1135  func (d *Decoder) UnreadBuffer() []byte {
  1136  	return d.s.unreadBuffer()
  1137  }
  1138  
  1139  // StackDepth returns the depth of the state machine for read JSON data.
  1140  // Each level on the stack represents a nested JSON object or array.
  1141  // It is incremented whenever an [BeginObject] or [BeginArray] token is encountered
  1142  // and decremented whenever an [EndObject] or [EndArray] token is encountered.
  1143  // The depth is zero-indexed, where zero represents the top-level JSON value.
  1144  func (d *Decoder) StackDepth() int {
  1145  	// NOTE: Keep in sync with Encoder.StackDepth.
  1146  	return d.s.Tokens.Depth() - 1
  1147  }
  1148  
  1149  // StackIndex returns information about the specified stack level.
  1150  // It must be a number between 0 and [Decoder.StackDepth], inclusive.
  1151  // For each level, it reports the kind:
  1152  //
  1153  //   - 0 for a level of zero,
  1154  //   - '{' for a level representing a JSON object, and
  1155  //   - '[' for a level representing a JSON array.
  1156  //
  1157  // It also reports the length of that JSON object or array.
  1158  // Each name and value in a JSON object is counted separately,
  1159  // so the effective number of members would be half the length.
  1160  // A complete JSON object must have an even length.
  1161  func (d *Decoder) StackIndex(i int) (Kind, int64) {
  1162  	// NOTE: Keep in sync with Encoder.StackIndex.
  1163  	switch s := d.s.Tokens.index(i); {
  1164  	case i > 0 && s.isObject():
  1165  		return '{', s.Length()
  1166  	case i > 0 && s.isArray():
  1167  		return '[', s.Length()
  1168  	default:
  1169  		return 0, s.Length()
  1170  	}
  1171  }
  1172  
  1173  // StackPointer returns a JSON Pointer (RFC 6901) to the most recently read value.
  1174  func (d *Decoder) StackPointer() Pointer {
  1175  	return Pointer(d.s.AppendStackPointer(nil, -1))
  1176  }
  1177  
  1178  func (d *decoderState) AppendStackPointer(b []byte, where int) []byte {
  1179  	d.Names.copyQuotedBuffer(d.buf)
  1180  	return d.state.appendStackPointer(b, where)
  1181  }
  1182  

View as plain text