Update vendor/ to match go.mod (#33)

This commit is contained in:
Eric Duncan 2020-02-14 09:30:51 -05:00 committed by GitHub
parent 4b460c48ee
commit e053d69769
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 179 additions and 228 deletions

View File

@ -1,6 +1,10 @@
on: [push] on: [push]
name: go-cicd name: go-cicd
# TODO: add go module publishing
# maybe, curl https://sum.golang.org/lookup/github.com/eduncan911/podcast@v1.4.2
#
jobs: jobs:
lint: lint:

View File

@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name> Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and/or distribute this software for any Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies. copyright notice and this permission notice appear in all copies.

View File

@ -16,9 +16,7 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and // when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe" // "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout // +build !js,!appengine,!safe,!disableunsafe
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew package spew
@ -36,49 +34,80 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil)) ptrSize = unsafe.Sizeof((*byte)(nil))
) )
type flag uintptr
var ( var (
// flagRO indicates whether the value field of a reflect.Value // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// is read-only. // internal reflect.Value fields. These values are valid before golang
flagRO flag // commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagAddr indicates whether the address of the reflect.Value's // flagKindWidth and flagKindShift indicate various bits that the
// value may be taken. // reflect package uses internally to track kind information.
flagAddr flag //
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
) )
// flagKindMask holds the bits that make up the kind func init() {
// part of the flags field. In all the supported versions, // Older versions of reflect.Value stored small integers directly in the
// it is in the lower 5 bits. // ptr field (which is named val in the older versions). Versions
const flagKindMask = flag(0x1f) // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// Different versions of Go have used different // field, so the offset of the flag field is different for those
// bit layouts for the flags type. This table // versions.
// records the known combinations. //
var okFlags = []struct { // This code constructs a new reflect.Value from a known small integer
ro, addr flag // and checks if the size of the reflect.Value struct indicates it has
}{{ // the scalar field. When it does, the offsets are updated accordingly.
// From Go 1.4 to 1.5 vv := reflect.ValueOf(0xf00)
ro: 1 << 5, if unsafe.Sizeof(vv) == (ptrSize * 4) {
addr: 1 << 7, offsetScalar = ptrSize * 2
}, { offsetFlag = ptrSize * 3
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
} }
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value. // Commit 90a7c3c86944 changed the flag positions such that the low
func flagField(v *reflect.Value) *flag { // order bits are the kind. This code extracts the kind from the flags
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) // field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
} }
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses // unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@ -90,56 +119,34 @@ func flagField(v *reflect.Value) *flag {
// This allows us to check for implementations of the Stringer and error // This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and // interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields. // inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) reflect.Value { func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { indirects := 1
return v vt := v.Type()
} upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
flagFieldPtr := flagField(&v) rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
*flagFieldPtr &^= flagRO if rvf&flagIndir != 0 {
*flagFieldPtr |= flagAddr vt = reflect.PtrTo(v.Type())
return v indirects++
} } else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// Sanity checks against future reflect package changes // reference types.
// to the type or semantics of the Value.flag field. switch vt.Kind() {
func init() { case reflect.Uintptr:
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") case reflect.Chan:
if !ok { case reflect.Func:
panic("reflect.Value has no flag field") case reflect.Map:
} case reflect.Ptr:
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { case reflect.UnsafePointer:
panic("reflect.Value flag field has changed kind") default:
} upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
type t0 int offsetScalar)
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
} }
} }
panic("reflect.Value read-only flag has changed semantics")
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
} }

View File

@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or // when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe" // "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe !go1.4 // +build js appengine safe disableunsafe
package spew package spew

View File

@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes) w.Write(closeParenBytes)
} }
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w. // prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) { func printHexPtr(w io.Writer, p uintptr) {
// Null pointer. // Null pointer.

View File

@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char. // cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them. // It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned // cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump // char. It is used to detect unsigned character arrays to hexdump
// them. // them.
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t. // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them. // It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
) )
// dumpState contains information about the state of a dump operation. // dumpState contains information about the state of a dump operation.
@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value. // Display dereferenced value.
d.w.Write(openParenBytes) d.w.Write(openParenBytes)
switch { switch {
case nilFound: case nilFound == true:
d.w.Write(nilAngleBytes) d.w.Write(nilAngleBytes)
case cycleFound: case cycleFound == true:
d.w.Write(circularBytes) d.w.Write(circularBytes)
default: default:

View File

@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value. // Display dereferenced value.
switch { switch {
case nilFound: case nilFound == true:
f.fs.Write(nilAngleBytes) f.fs.Write(nilAngleBytes)
case cycleFound: case cycleFound == true:
f.fs.Write(circularShortBytes) f.fs.Write(circularShortBytes)
default: default:

18
vendor/gopkg.in/yaml.v2/.travis.yml generated vendored
View File

@ -1,16 +1,12 @@
language: go language: go
go: go:
- "1.4.x" - 1.4
- "1.5.x" - 1.5
- "1.6.x" - 1.6
- "1.7.x" - 1.7
- "1.8.x" - 1.8
- "1.9.x" - 1.9
- "1.10.x" - tip
- "1.11.x"
- "1.12.x"
- "1.13.x"
- "tip"
go_import_path: gopkg.in/yaml.v2 go_import_path: gopkg.in/yaml.v2

48
vendor/gopkg.in/yaml.v2/decode.go generated vendored
View File

@ -229,10 +229,6 @@ type decoder struct {
mapType reflect.Type mapType reflect.Type
terrors []string terrors []string
strict bool strict bool
decodeCount int
aliasCount int
aliasDepth int
} }
var ( var (
@ -318,43 +314,7 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
return out, false, false return out, false, false
} }
const (
// 400,000 decode operations is ~500kb of dense object declarations, or
// ~5kb of dense object declarations with 10000% alias expansion
alias_ratio_range_low = 400000
// 4,000,000 decode operations is ~5MB of dense object declarations, or
// ~4.5MB of dense object declarations with 10% alias expansion
alias_ratio_range_high = 4000000
// alias_ratio_range is the range over which we scale allowed alias ratios
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
)
func allowedAliasRatio(decodeCount int) float64 {
switch {
case decodeCount <= alias_ratio_range_low:
// allow 99% to come from alias expansion for small-to-medium documents
return 0.99
case decodeCount >= alias_ratio_range_high:
// allow 10% to come from alias expansion for very large documents
return 0.10
default:
// scale smoothly from 99% down to 10% over the range.
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
}
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
d.decodeCount++
if d.aliasDepth > 0 {
d.aliasCount++
}
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
failf("document contains excessive aliasing")
}
switch n.kind { switch n.kind {
case documentNode: case documentNode:
return d.document(n, out) return d.document(n, out)
@ -393,9 +353,7 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
failf("anchor '%s' value contains itself", n.value) failf("anchor '%s' value contains itself", n.value)
} }
d.aliases[n] = true d.aliases[n] = true
d.aliasDepth++
good = d.unmarshal(n.alias, out) good = d.unmarshal(n.alias, out)
d.aliasDepth--
delete(d.aliases, n) delete(d.aliases, n)
return good return good
} }
@ -788,7 +746,8 @@ func (d *decoder) merge(n *node, out reflect.Value) {
case mappingNode: case mappingNode:
d.unmarshal(n, out) d.unmarshal(n, out)
case aliasNode: case aliasNode:
if n.alias != nil && n.alias.kind != mappingNode { an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
failWantMap() failWantMap()
} }
d.unmarshal(n, out) d.unmarshal(n, out)
@ -797,7 +756,8 @@ func (d *decoder) merge(n *node, out reflect.Value) {
for i := len(n.children) - 1; i >= 0; i-- { for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i] ni := n.children[i]
if ni.kind == aliasNode { if ni.kind == aliasNode {
if ni.alias != nil && ni.alias.kind != mappingNode { an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
failWantMap() failWantMap()
} }
} else if ni.kind != mappingNode { } else if ni.kind != mappingNode {

2
vendor/gopkg.in/yaml.v2/resolve.go generated vendored
View File

@ -81,7 +81,7 @@ func resolvableTag(tag string) bool {
return false return false
} }
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) { func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) { if !resolvableTag(tag) {

121
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored
View File

@ -626,17 +626,30 @@ func trace(args ...interface{}) func() {
func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
// While we need more tokens to fetch, do it. // While we need more tokens to fetch, do it.
for { for {
if parser.tokens_head != len(parser.tokens) { // Check if we really need to fetch more tokens.
// If queue is non-empty, check if any potential simple key may need_more_tokens := false
// occupy the head position.
head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] if parser.tokens_head == len(parser.tokens) {
if !ok { // Queue is empty.
break need_more_tokens = true
} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { } else {
// Check if any potential simple key may occupy the head position.
if !yaml_parser_stale_simple_keys(parser) {
return false return false
} else if !valid {
break
} }
for i := range parser.simple_keys {
simple_key := &parser.simple_keys[i]
if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
need_more_tokens = true
break
}
}
}
// We are finished.
if !need_more_tokens {
break
} }
// Fetch the next token. // Fetch the next token.
if !yaml_parser_fetch_next_token(parser) { if !yaml_parser_fetch_next_token(parser) {
@ -665,6 +678,11 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
return false return false
} }
// Remove obsolete potential simple keys.
if !yaml_parser_stale_simple_keys(parser) {
return false
}
// Check the indentation level against the current column. // Check the indentation level against the current column.
if !yaml_parser_unroll_indent(parser, parser.mark.column) { if !yaml_parser_unroll_indent(parser, parser.mark.column) {
return false return false
@ -819,30 +837,29 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
"found character that cannot start any token") "found character that cannot start any token")
} }
func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { // Check the list of potential simple keys and remove the positions that
if !simple_key.possible { // cannot contain simple keys anymore.
return false, true func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
} // Check for a potential simple key for each flow level.
for i := range parser.simple_keys {
simple_key := &parser.simple_keys[i]
// The 1.2 specification says: // The specification requires that a simple key
// //
// "If the ? indicator is omitted, parsing needs to see past the // - is limited to a single line,
// implicit key to recognize it as such. To limit the amount of // - is shorter than 1024 characters.
// lookahead required, the “:” indicator must appear at most 1024 if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
// Unicode characters beyond the start of the key. In addition, the key
// is restricted to a single line." // Check if the potential simple key to be removed is required.
// if simple_key.required {
if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { return yaml_parser_set_scanner_error(parser,
// Check if the potential simple key to be removed is required. "while scanning a simple key", simple_key.mark,
if simple_key.required { "could not find expected ':'")
return false, yaml_parser_set_scanner_error(parser, }
"while scanning a simple key", simple_key.mark, simple_key.possible = false
"could not find expected ':'")
} }
simple_key.possible = false
return false, true
} }
return true, true return true
} }
// Check if a simple key may start at the current position and add it if // Check if a simple key may start at the current position and add it if
@ -862,14 +879,13 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
possible: true, possible: true,
required: required, required: required,
token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
mark: parser.mark,
} }
simple_key.mark = parser.mark
if !yaml_parser_remove_simple_key(parser) { if !yaml_parser_remove_simple_key(parser) {
return false return false
} }
parser.simple_keys[len(parser.simple_keys)-1] = simple_key parser.simple_keys[len(parser.simple_keys)-1] = simple_key
parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
} }
return true return true
} }
@ -884,33 +900,19 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
"while scanning a simple key", parser.simple_keys[i].mark, "while scanning a simple key", parser.simple_keys[i].mark,
"could not find expected ':'") "could not find expected ':'")
} }
// Remove the key from the stack.
parser.simple_keys[i].possible = false
delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
} }
// Remove the key from the stack.
parser.simple_keys[i].possible = false
return true return true
} }
// max_flow_level limits the flow_level
const max_flow_level = 10000
// Increase the flow level and resize the simple key list if needed. // Increase the flow level and resize the simple key list if needed.
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
// Reset the simple key on the next level. // Reset the simple key on the next level.
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
possible: false,
required: false,
token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
mark: parser.mark,
})
// Increase the flow level. // Increase the flow level.
parser.flow_level++ parser.flow_level++
if parser.flow_level > max_flow_level {
return yaml_parser_set_scanner_error(parser,
"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
fmt.Sprintf("exceeded max depth of %d", max_flow_level))
}
return true return true
} }
@ -918,16 +920,11 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
if parser.flow_level > 0 { if parser.flow_level > 0 {
parser.flow_level-- parser.flow_level--
last := len(parser.simple_keys) - 1 parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
parser.simple_keys = parser.simple_keys[:last]
} }
return true return true
} }
// max_indents limits the indents stack size
const max_indents = 10000
// Push the current indentation level to the stack and set the new level // Push the current indentation level to the stack and set the new level
// the current column is greater than the indentation level. In this case, // the current column is greater than the indentation level. In this case,
// append or insert the specified token into the token queue. // append or insert the specified token into the token queue.
@ -942,11 +939,6 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml
// indentation level. // indentation level.
parser.indents = append(parser.indents, parser.indent) parser.indents = append(parser.indents, parser.indent)
parser.indent = column parser.indent = column
if len(parser.indents) > max_indents {
return yaml_parser_set_scanner_error(parser,
"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
fmt.Sprintf("exceeded max depth of %d", max_indents))
}
// Create a token and insert it into the queue. // Create a token and insert it into the queue.
token := yaml_token_t{ token := yaml_token_t{
@ -997,8 +989,6 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
// Initialize the simple key stack. // Initialize the simple key stack.
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
parser.simple_keys_by_tok = make(map[int]int)
// A simple key is allowed at the beginning of the stream. // A simple key is allowed at the beginning of the stream.
parser.simple_key_allowed = true parser.simple_key_allowed = true
@ -1280,11 +1270,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
simple_key := &parser.simple_keys[len(parser.simple_keys)-1] simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
// Have we found a simple key? // Have we found a simple key?
if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { if simple_key.possible {
return false
} else if valid {
// Create the KEY token and insert it into the queue. // Create the KEY token and insert it into the queue.
token := yaml_token_t{ token := yaml_token_t{
typ: yaml_KEY_TOKEN, typ: yaml_KEY_TOKEN,
@ -1302,7 +1288,6 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
// Remove the simple key. // Remove the simple key.
simple_key.possible = false simple_key.possible = false
delete(parser.simple_keys_by_tok, simple_key.token_number)
// A simple key cannot follow another simple key. // A simple key cannot follow another simple key.
parser.simple_key_allowed = false parser.simple_key_allowed = false

2
vendor/gopkg.in/yaml.v2/yaml.go generated vendored
View File

@ -89,7 +89,7 @@ func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true) return unmarshal(in, out, true)
} }
// A Decoder reads and decodes YAML values from an input stream. // A Decorder reads and decodes YAML values from an input stream.
type Decoder struct { type Decoder struct {
strict bool strict bool
parser *parser parser *parser

1
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored
View File

@ -579,7 +579,6 @@ type yaml_parser_t struct {
simple_key_allowed bool // May a simple key occur at the current position? simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys. simple_keys []yaml_simple_key_t // The stack of simple keys.
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff // Parser stuff

4
vendor/modules.txt vendored
View File

@ -1,4 +1,4 @@
# github.com/davecgh/go-spew v1.1.1 # github.com/davecgh/go-spew v1.1.0
github.com/davecgh/go-spew/spew github.com/davecgh/go-spew/spew
# github.com/pkg/errors v0.9.1 # github.com/pkg/errors v0.9.1
github.com/pkg/errors github.com/pkg/errors
@ -6,5 +6,5 @@ github.com/pkg/errors
github.com/pmezard/go-difflib/difflib github.com/pmezard/go-difflib/difflib
# github.com/stretchr/testify v1.4.0 # github.com/stretchr/testify v1.4.0
github.com/stretchr/testify/assert github.com/stretchr/testify/assert
# gopkg.in/yaml.v2 v2.2.8 # gopkg.in/yaml.v2 v2.2.2
gopkg.in/yaml.v2 gopkg.in/yaml.v2