summaryrefslogtreecommitdiff
path: root/libgo/go/golang.org
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/golang.org')
-rw-r--r--libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go6
-rw-r--r--libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go2
-rw-r--r--libgo/go/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go2
-rw-r--r--libgo/go/golang.org/x/crypto/cryptobyte/asn1.go7
-rw-r--r--libgo/go/golang.org/x/crypto/curve25519/curve25519.go52
-rw-r--r--libgo/go/golang.org/x/crypto/curve25519/internal/field/fe.go416
-rw-r--r--libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go14
-rw-r--r--libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go12
-rw-r--r--libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go16
-rw-r--r--libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go12
-rw-r--r--libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_generic.go264
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/bits_compat.go40
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/bits_go1.13.go22
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/mac_noasm.go10
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/poly1305.go99
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/sum_amd64.go48
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/sum_generic.go310
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go48
-rw-r--r--libgo/go/golang.org/x/crypto/internal/poly1305/sum_s390x.go76
-rw-r--r--libgo/go/golang.org/x/mod/modfile/rule.go545
-rw-r--r--libgo/go/golang.org/x/mod/modfile/work.go234
-rw-r--r--libgo/go/golang.org/x/mod/module/module.go9
-rw-r--r--libgo/go/golang.org/x/mod/semver/semver.go10
-rw-r--r--libgo/go/golang.org/x/mod/zip/zip.go143
-rw-r--r--libgo/go/golang.org/x/net/dns/dnsmessage/message.go22
-rw-r--r--libgo/go/golang.org/x/net/http/httpproxy/proxy.go4
-rw-r--r--libgo/go/golang.org/x/net/http2/hpack/huffman.go38
-rw-r--r--libgo/go/golang.org/x/net/idna/go118.go14
-rw-r--r--libgo/go/golang.org/x/net/idna/idna10.0.0.go6
-rw-r--r--libgo/go/golang.org/x/net/idna/idna9.0.0.go4
-rw-r--r--libgo/go/golang.org/x/net/idna/pre_go118.go12
-rw-r--r--libgo/go/golang.org/x/net/idna/punycode.go36
-rw-r--r--libgo/go/golang.org/x/net/lif/address.go1
-rw-r--r--libgo/go/golang.org/x/net/lif/binary.go1
-rw-r--r--libgo/go/golang.org/x/net/lif/lif.go1
-rw-r--r--libgo/go/golang.org/x/net/lif/link.go1
-rw-r--r--libgo/go/golang.org/x/net/lif/sys.go1
-rw-r--r--libgo/go/golang.org/x/net/lif/syscall.go1
-rw-r--r--libgo/go/golang.org/x/net/nettest/nettest.go14
-rw-r--r--libgo/go/golang.org/x/net/route/address.go4
-rw-r--r--libgo/go/golang.org/x/net/route/message.go2
-rw-r--r--libgo/go/golang.org/x/net/route/sys_freebsd.go3
-rw-r--r--libgo/go/golang.org/x/net/route/syscall.go26
-rw-r--r--libgo/go/golang.org/x/sync/AUTHORS3
-rw-r--r--libgo/go/golang.org/x/sync/CONTRIBUTORS3
-rw-r--r--libgo/go/golang.org/x/sync/LICENSE27
-rw-r--r--libgo/go/golang.org/x/sync/PATENTS22
-rw-r--r--libgo/go/golang.org/x/sync/semaphore/semaphore.go136
-rw-r--r--libgo/go/golang.org/x/sys/cpu/cpu.go1
-rw-r--r--libgo/go/golang.org/x/sys/cpu/cpu_gc_x86.go4
-rw-r--r--libgo/go/golang.org/x/sys/cpu/cpu_x86.go9
-rw-r--r--libgo/go/golang.org/x/text/unicode/bidi/core.go6
-rw-r--r--libgo/go/golang.org/x/tools/cover/profile.go11
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/internal/facts/imports.go33
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go37
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/composite/composite.go73
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/copylock/copylock.go75
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go6
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go6
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go112
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go7
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/printf/printf.go76
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/printf/types.go213
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/shift/shift.go36
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go6
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/stringintconv/string.go149
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go42
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/tests/tests.go70
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go3
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go6
-rw-r--r--libgo/go/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go3
-rw-r--r--libgo/go/golang.org/x/tools/go/ast/astutil/enclosing.go20
-rw-r--r--libgo/go/golang.org/x/tools/go/ast/astutil/rewrite.go6
-rw-r--r--libgo/go/golang.org/x/tools/go/ast/inspector/typeof.go9
-rw-r--r--libgo/go/golang.org/x/tools/go/types/objectpath/objectpath.go153
-rw-r--r--libgo/go/golang.org/x/tools/go/types/typeutil/callee.go29
-rw-r--r--libgo/go/golang.org/x/tools/go/types/typeutil/map.go138
-rw-r--r--libgo/go/golang.org/x/tools/internal/lsp/fuzzy/input.go37
-rw-r--r--libgo/go/golang.org/x/tools/internal/lsp/fuzzy/matcher.go23
-rw-r--r--libgo/go/golang.org/x/tools/internal/lsp/fuzzy/symbol.go236
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/common.go79
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/enabled_go117.go12
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/enabled_go118.go15
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/normalize.go216
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/termlist.go172
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go117.go192
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go118.go146
-rw-r--r--libgo/go/golang.org/x/tools/internal/typeparams/typeterm.go170
-rw-r--r--libgo/go/golang.org/x/tools/txtar/archive.go140
89 files changed, 4984 insertions, 572 deletions
diff --git a/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
index 0d7bac3f7db..93da7322bc4 100644
--- a/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
+++ b/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
@@ -26,6 +26,10 @@ const (
// NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305
// variant of this AEAD, in bytes.
NonceSizeX = 24
+
+ // Overhead is the size of the Poly1305 authentication tag, and the
+ // difference between a ciphertext length and its plaintext.
+ Overhead = 16
)
type chacha20poly1305 struct {
@@ -47,7 +51,7 @@ func (c *chacha20poly1305) NonceSize() int {
}
func (c *chacha20poly1305) Overhead() int {
- return 16
+ return Overhead
}
func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
diff --git a/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
index fe191d395d5..96b2fd898bb 100644
--- a/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
+++ b/libgo/go/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
@@ -8,8 +8,8 @@ import (
"encoding/binary"
"golang.org/x/crypto/chacha20"
+ "golang.org/x/crypto/internal/poly1305"
"golang.org/x/crypto/internal/subtle"
- "golang.org/x/crypto/poly1305"
)
func writeWithPadding(p *poly1305.MAC, b []byte) {
diff --git a/libgo/go/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/libgo/go/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
index d9d46b96396..1cebfe946f4 100644
--- a/libgo/go/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
+++ b/libgo/go/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
@@ -35,7 +35,7 @@ func (*xchacha20poly1305) NonceSize() int {
}
func (*xchacha20poly1305) Overhead() int {
- return 16
+ return Overhead
}
func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
diff --git a/libgo/go/golang.org/x/crypto/cryptobyte/asn1.go b/libgo/go/golang.org/x/crypto/cryptobyte/asn1.go
index 83c776de083..3a1674a1e57 100644
--- a/libgo/go/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/libgo/go/golang.org/x/crypto/cryptobyte/asn1.go
@@ -407,7 +407,12 @@ func (s *String) ReadASN1Enum(out *int) bool {
func (s *String) readBase128Int(out *int) bool {
ret := 0
for i := 0; len(*s) > 0; i++ {
- if i == 4 {
+ if i == 5 {
+ return false
+ }
+ // Avoid overflowing int on a 32-bit platform.
+ // We don't want different behavior based on the architecture.
+ if ret >= 1<<(31-7) {
return false
}
ret <<= 7
diff --git a/libgo/go/golang.org/x/crypto/curve25519/curve25519.go b/libgo/go/golang.org/x/crypto/curve25519/curve25519.go
index 4b9a655d1b5..cda3fdd3540 100644
--- a/libgo/go/golang.org/x/crypto/curve25519/curve25519.go
+++ b/libgo/go/golang.org/x/crypto/curve25519/curve25519.go
@@ -10,6 +10,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519"
import (
"crypto/subtle"
"fmt"
+
+ "golang.org/x/crypto/curve25519/internal/field"
)
// ScalarMult sets dst to the product scalar * point.
@@ -18,7 +20,55 @@ import (
// zeroes, irrespective of the scalar. Instead, use the X25519 function, which
// will return an error.
func ScalarMult(dst, scalar, point *[32]byte) {
- scalarMult(dst, scalar, point)
+ var e [32]byte
+
+ copy(e[:], scalar[:])
+ e[0] &= 248
+ e[31] &= 127
+ e[31] |= 64
+
+ var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element
+ x1.SetBytes(point[:])
+ x2.One()
+ x3.Set(&x1)
+ z3.One()
+
+ swap := 0
+ for pos := 254; pos >= 0; pos-- {
+ b := e[pos/8] >> uint(pos&7)
+ b &= 1
+ swap ^= int(b)
+ x2.Swap(&x3, swap)
+ z2.Swap(&z3, swap)
+ swap = int(b)
+
+ tmp0.Subtract(&x3, &z3)
+ tmp1.Subtract(&x2, &z2)
+ x2.Add(&x2, &z2)
+ z2.Add(&x3, &z3)
+ z3.Multiply(&tmp0, &x2)
+ z2.Multiply(&z2, &tmp1)
+ tmp0.Square(&tmp1)
+ tmp1.Square(&x2)
+ x3.Add(&z3, &z2)
+ z2.Subtract(&z3, &z2)
+ x2.Multiply(&tmp1, &tmp0)
+ tmp1.Subtract(&tmp1, &tmp0)
+ z2.Square(&z2)
+
+ z3.Mult32(&tmp1, 121666)
+ x3.Square(&x3)
+ tmp0.Add(&tmp0, &z3)
+ z3.Multiply(&x1, &z2)
+ z2.Multiply(&tmp1, &tmp0)
+ }
+
+ x2.Swap(&x3, swap)
+ z2.Swap(&z3, swap)
+
+ z2.Invert(&z2)
+ x2.Multiply(&x2, &z2)
+ copy(dst[:], x2.Bytes())
}
// ScalarBaseMult sets dst to the product scalar * base where base is the
diff --git a/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe.go b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe.go
new file mode 100644
index 00000000000..ca841ad99e3
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe.go
@@ -0,0 +1,416 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+ // An element t represents the integer
+ // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+ //
+ // Between operations, all limbs are expected to be lower than 2^52.
+ l0 uint64
+ l1 uint64
+ l2 uint64
+ l3 uint64
+ l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+ *v = *feZero
+ return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+ *v = *feOne
+ return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+ v.carryPropagate()
+
+ // After the light reduction we now have a field element representation
+ // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+ // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+ // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+ c := (v.l0 + 19) >> 51
+ c = (v.l1 + c) >> 51
+ c = (v.l2 + c) >> 51
+ c = (v.l3 + c) >> 51
+ c = (v.l4 + c) >> 51
+
+ // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+ // effectively applying the reduction identity to the carry.
+ v.l0 += 19 * c
+
+ v.l1 += v.l0 >> 51
+ v.l0 = v.l0 & maskLow51Bits
+ v.l2 += v.l1 >> 51
+ v.l1 = v.l1 & maskLow51Bits
+ v.l3 += v.l2 >> 51
+ v.l2 = v.l2 & maskLow51Bits
+ v.l4 += v.l3 >> 51
+ v.l3 = v.l3 & maskLow51Bits
+ // no additional carry
+ v.l4 = v.l4 & maskLow51Bits
+
+ return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+ v.l0 = a.l0 + b.l0
+ v.l1 = a.l1 + b.l1
+ v.l2 = a.l2 + b.l2
+ v.l3 = a.l3 + b.l3
+ v.l4 = a.l4 + b.l4
+ // Using the generic implementation here is actually faster than the
+ // assembly. Probably because the body of this function is so simple that
+ // the compiler can figure out better optimizations by inlining the carry
+ // propagation. TODO
+ return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+ // We first add 2 * p, to guarantee the subtraction won't underflow, and
+ // then subtract b (which can be up to 2^255 + 2^13 * 19).
+ v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+ v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+ v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+ v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+ v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+ return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+ return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p βˆ’ 2. It uses the
+ // same sequence of 255 squarings and 11 multiplications as [Curve25519].
+ var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+ z2.Square(z) // 2
+ t.Square(&z2) // 4
+ t.Square(&t) // 8
+ z9.Multiply(&t, z) // 9
+ z11.Multiply(&z9, &z2) // 11
+ t.Square(&z11) // 22
+ z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+ t.Square(&z2_5_0) // 2^6 - 2^1
+ for i := 0; i < 4; i++ {
+ t.Square(&t) // 2^10 - 2^5
+ }
+ z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+ t.Square(&z2_10_0) // 2^11 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^20 - 2^10
+ }
+ z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+ t.Square(&z2_20_0) // 2^21 - 2^1
+ for i := 0; i < 19; i++ {
+ t.Square(&t) // 2^40 - 2^20
+ }
+ t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+ t.Square(&t) // 2^41 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^50 - 2^10
+ }
+ z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+ t.Square(&z2_50_0) // 2^51 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^100 - 2^50
+ }
+ z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+ t.Square(&z2_100_0) // 2^101 - 2^1
+ for i := 0; i < 99; i++ {
+ t.Square(&t) // 2^200 - 2^100
+ }
+ t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+ t.Square(&t) // 2^201 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^250 - 2^50
+ }
+ t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+ t.Square(&t) // 2^251 - 2^1
+ t.Square(&t) // 2^252 - 2^2
+ t.Square(&t) // 2^253 - 2^3
+ t.Square(&t) // 2^254 - 2^4
+ t.Square(&t) // 2^255 - 2^5
+
+ return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+ *v = *a
+ return v
+}
+
+// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032.
+func (v *Element) SetBytes(x []byte) *Element {
+ if len(x) != 32 {
+ panic("edwards25519: invalid field element input size")
+ }
+
+ // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+ v.l0 = binary.LittleEndian.Uint64(x[0:8])
+ v.l0 &= maskLow51Bits
+ // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+ v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+ v.l1 &= maskLow51Bits
+ // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+ v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+ v.l2 &= maskLow51Bits
+ // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+ v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+ v.l3 &= maskLow51Bits
+ // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Note: not bytes 25:33, shift 4, to avoid overread.
+ v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+ v.l4 &= maskLow51Bits
+
+ return v
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [32]byte
+ return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+ t := *v
+ t.reduce()
+
+ var buf [8]byte
+ for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+ bitsOffset := i * 51
+ binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
+ for i, bb := range buf {
+ off := bitsOffset/8 + i
+ if off >= len(out) {
+ break
+ }
+ out[off] |= bb
+ }
+ }
+
+ return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+ sa, sv := u.Bytes(), v.Bytes()
+ return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+ m := mask64Bits(cond)
+ v.l0 = (m & a.l0) | (^m & b.l0)
+ v.l1 = (m & a.l1) | (^m & b.l1)
+ v.l2 = (m & a.l2) | (^m & b.l2)
+ v.l3 = (m & a.l3) | (^m & b.l3)
+ v.l4 = (m & a.l4) | (^m & b.l4)
+ return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+ m := mask64Bits(cond)
+ t := m & (v.l0 ^ u.l0)
+ v.l0 ^= t
+ u.l0 ^= t
+ t = m & (v.l1 ^ u.l1)
+ v.l1 ^= t
+ u.l1 ^= t
+ t = m & (v.l2 ^ u.l2)
+ v.l2 ^= t
+ u.l2 ^= t
+ t = m & (v.l3 ^ u.l3)
+ v.l3 ^= t
+ u.l3 ^= t
+ t = m & (v.l4 ^ u.l4)
+ v.l4 ^= t
+ u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+ return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+ return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+ feMul(v, x, y)
+ return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+ feSquare(v, x)
+ return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+ x0lo, x0hi := mul51(x.l0, y)
+ x1lo, x1hi := mul51(x.l1, y)
+ x2lo, x2hi := mul51(x.l2, y)
+ x3lo, x3hi := mul51(x.l3, y)
+ x4lo, x4hi := mul51(x.l4, y)
+ v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+ v.l1 = x1lo + x0hi
+ v.l2 = x2lo + x1hi
+ v.l3 = x3lo + x2hi
+ v.l4 = x4lo + x3hi
+ // The hi portions are going to be only 32 bits, plus any previous excess,
+ // so we can skip the carry propagation.
+ return v
+}
+
+// mul51 returns lo + hi * 2⁡¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+ mh, ml := bits.Mul64(a, uint64(b))
+ lo = ml & maskLow51Bits
+ hi = (mh << 13) | (ml >> 51)
+ return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+ var t0, t1, t2 Element
+
+ t0.Square(x) // x^2
+ t1.Square(&t0) // x^4
+ t1.Square(&t1) // x^8
+ t1.Multiply(x, &t1) // x^9
+ t0.Multiply(&t0, &t1) // x^11
+ t0.Square(&t0) // x^22
+ t0.Multiply(&t1, &t0) // x^31
+ t1.Square(&t0) // x^62
+ for i := 1; i < 5; i++ { // x^992
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
+ t1.Square(&t0) // 2^11 - 2
+ for i := 1; i < 10; i++ { // 2^20 - 2^10
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^20 - 1
+ t2.Square(&t1) // 2^21 - 2
+ for i := 1; i < 20; i++ { // 2^40 - 2^20
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^40 - 1
+ t1.Square(&t1) // 2^41 - 2
+ for i := 1; i < 10; i++ { // 2^50 - 2^10
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^50 - 1
+ t1.Square(&t0) // 2^51 - 2
+ for i := 1; i < 50; i++ { // 2^100 - 2^50
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^100 - 1
+ t2.Square(&t1) // 2^101 - 2
+ for i := 1; i < 100; i++ { // 2^200 - 2^100
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^200 - 1
+ t1.Square(&t1) // 2^201 - 2
+ for i := 1; i < 50; i++ { // 2^250 - 2^50
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^250 - 1
+ t0.Square(&t0) // 2^251 - 2
+ t0.Square(&t0) // 2^252 - 4
+ return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+ 2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
+ var a, b Element
+
+ // r = (u * v3) * (u * v7)^((p-5)/8)
+ v2 := a.Square(v)
+ uv3 := b.Multiply(u, b.Multiply(v2, v))
+ uv7 := a.Multiply(uv3, a.Square(v2))
+ r.Multiply(uv3, r.Pow22523(uv7))
+
+ check := a.Multiply(v, a.Square(r)) // check = v * r^2
+
+ uNeg := b.Negate(u)
+ correctSignSqrt := check.Equal(u)
+ flippedSignSqrt := check.Equal(uNeg)
+ flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
+
+ rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
+ // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+ r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
+
+ r.Absolute(r) // Choose the nonnegative square root.
+ return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
new file mode 100644
index 00000000000..8fe583939f1
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
@@ -0,0 +1,14 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
new file mode 100644
index 00000000000..ddb6c9b8f7f
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+// +build !amd64 !gc purego
+
+package field
+
+func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
+
+func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
new file mode 100644
index 00000000000..af459ef5154
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+// +build arm64,gc,!purego
+
+package field
+
+//go:noescape
+func carryPropagate(v *Element)
+
+func (v *Element) carryPropagate() *Element {
+ carryPropagate(v)
+ return v
+}
diff --git a/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
new file mode 100644
index 00000000000..234a5b2e5d1
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 || !gc || purego
+// +build !arm64 !gc purego
+
+package field
+
+func (v *Element) carryPropagate() *Element {
+ return v.carryPropagateGeneric()
+}
diff --git a/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
new file mode 100644
index 00000000000..7b5b78cbd6d
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ lo, c := bits.Add64(lo, v.lo, 0)
+ hi, _ = bits.Add64(hi, v.hi, c)
+ return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+ return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+ a0 := a.l0
+ a1 := a.l1
+ a2 := a.l2
+ a3 := a.l3
+ a4 := a.l4
+
+ b0 := b.l0
+ b1 := b.l1
+ b2 := b.l2
+ b3 := b.l3
+ b4 := b.l4
+
+ // Limb multiplication works like pen-and-paper columnar multiplication, but
+ // with 51-bit limbs instead of digits.
+ //
+ // a4 a3 a2 a1 a0 x
+ // b4 b3 b2 b1 b0 =
+ // ------------------------
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a4b1 a3b1 a2b1 a1b1 a0b1 +
+ // a4b2 a3b2 a2b2 a1b2 a0b2 +
+ // a4b3 a3b3 a2b3 a1b3 a0b3 +
+ // a4b4 a3b4 a2b4 a1b4 a0b4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // We can then use the reduction identity (a * 2²⁡⁡ + b = a * 19 + b) to
+ // reduce the limbs that would overflow 255 bits. r5 * 2²⁡⁡ becomes 19 * r5,
+ // r6 * 2³⁰⁢ becomes 19 * r6 * 2⁡¹, etc.
+ //
+ // Reduction can be carried out simultaneously to multiplication. For
+ // example, we do not compute r5: whenever the result of a multiplication
+ // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+ //
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a3b1 a2b1 a1b1 a0b1 19Γ—a4b1 +
+ // a2b2 a1b2 a0b2 19Γ—a4b2 19Γ—a3b2 +
+ // a1b3 a0b3 19Γ—a4b3 19Γ—a3b3 19Γ—a2b3 +
+ // a0b4 19Γ—a4b4 19Γ—a3b4 19Γ—a2b4 19Γ—a1b4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // Finally we add up the columns into wide, overlapping limbs.
+
+ a1_19 := a1 * 19
+ a2_19 := a2 * 19
+ a3_19 := a3 * 19
+ a4_19 := a4 * 19
+
+ // r0 = a0Γ—b0 + 19Γ—(a1Γ—b4 + a2Γ—b3 + a3Γ—b2 + a4Γ—b1)
+ r0 := mul64(a0, b0)
+ r0 = addMul64(r0, a1_19, b4)
+ r0 = addMul64(r0, a2_19, b3)
+ r0 = addMul64(r0, a3_19, b2)
+ r0 = addMul64(r0, a4_19, b1)
+
+ // r1 = a0Γ—b1 + a1Γ—b0 + 19Γ—(a2Γ—b4 + a3Γ—b3 + a4Γ—b2)
+ r1 := mul64(a0, b1)
+ r1 = addMul64(r1, a1, b0)
+ r1 = addMul64(r1, a2_19, b4)
+ r1 = addMul64(r1, a3_19, b3)
+ r1 = addMul64(r1, a4_19, b2)
+
+ // r2 = a0Γ—b2 + a1Γ—b1 + a2Γ—b0 + 19Γ—(a3Γ—b4 + a4Γ—b3)
+ r2 := mul64(a0, b2)
+ r2 = addMul64(r2, a1, b1)
+ r2 = addMul64(r2, a2, b0)
+ r2 = addMul64(r2, a3_19, b4)
+ r2 = addMul64(r2, a4_19, b3)
+
+ // r3 = a0Γ—b3 + a1Γ—b2 + a2Γ—b1 + a3Γ—b0 + 19Γ—a4Γ—b4
+ r3 := mul64(a0, b3)
+ r3 = addMul64(r3, a1, b2)
+ r3 = addMul64(r3, a2, b1)
+ r3 = addMul64(r3, a3, b0)
+ r3 = addMul64(r3, a4_19, b4)
+
+ // r4 = a0Γ—b4 + a1Γ—b3 + a2Γ—b2 + a3Γ—b1 + a4Γ—b0
+ r4 := mul64(a0, b4)
+ r4 = addMul64(r4, a1, b3)
+ r4 = addMul64(r4, a2, b2)
+ r4 = addMul64(r4, a3, b1)
+ r4 = addMul64(r4, a4, b0)
+
+ // After the multiplication, we need to reduce (carry) the five coefficients
+ // to obtain a result with limbs that are at most slightly larger than 2⁡¹,
+ // to respect the Element invariant.
+ //
+ // Overall, the reduction works the same as carryPropagate, except with
+ // wider inputs: we take the carry for each coefficient by shifting it right
+ // by 51, and add it to the limb above it. The top carry is multiplied by 19
+ // according to the reduction identity and added to the lowest limb.
+ //
+ // The largest coefficient (r0) will be at most 111 bits, which guarantees
+ // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+ //
+ // r0 = a0Γ—b0 + 19Γ—(a1Γ—b4 + a2Γ—b3 + a3Γ—b2 + a4Γ—b1)
+ // r0 < 2⁡²×2⁡² + 19Γ—(2⁡²×2⁡² + 2⁡²×2⁡² + 2⁡²×2⁡² + 2⁡²×2⁡²)
+ // r0 < (1 + 19 Γ— 4) Γ— 2⁡² Γ— 2⁡²
+ // r0 < 2⁷ Γ— 2⁡² Γ— 2⁡²
+ // r0 < 2ΒΉΒΉΒΉ
+ //
+ // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+ // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+ // allows us to easily apply the reduction identity.
+ //
+ // r4 = a0Γ—b4 + a1Γ—b3 + a2Γ—b2 + a3Γ—b1 + a4Γ—b0
+ // r4 < 5 Γ— 2⁡² Γ— 2⁡²
+ // r4 < 2¹⁰⁷
+ //
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ // Now all coefficients fit into 64-bit registers but are still too large to
+ // be passed around as a Element. We therefore do one last carry chain,
+ // where the carries will be small enough to fit in the wiggle room above 2⁡¹.
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+ l0 := a.l0
+ l1 := a.l1
+ l2 := a.l2
+ l3 := a.l3
+ l4 := a.l4
+
+ // Squaring works precisely like multiplication above, but thanks to its
+ // symmetry we get to group a few terms together.
+ //
+ // l4 l3 l2 l1 l0 x
+ // l4 l3 l2 l1 l0 =
+ // ------------------------
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l4l1 l3l1 l2l1 l1l1 l0l1 +
+ // l4l2 l3l2 l2l2 l1l2 l0l2 +
+ // l4l3 l3l3 l2l3 l1l3 l0l3 +
+ // l4l4 l3l4 l2l4 l1l4 l0l4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l3l1 l2l1 l1l1 l0l1 19Γ—l4l1 +
+ // l2l2 l1l2 l0l2 19Γ—l4l2 19Γ—l3l2 +
+ // l1l3 l0l3 19Γ—l4l3 19Γ—l3l3 19Γ—l2l3 +
+ // l0l4 19Γ—l4l4 19Γ—l3l4 19Γ—l2l4 19Γ—l1l4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // With precomputed 2Γ—, 19Γ—, and 2Γ—19Γ— terms, we can compute each limb with
+ // only three Mul64 and four Add64, instead of five and eight.
+
+ l0_2 := l0 * 2
+ l1_2 := l1 * 2
+
+ l1_38 := l1 * 38
+ l2_38 := l2 * 38
+ l3_38 := l3 * 38
+
+ l3_19 := l3 * 19
+ l4_19 := l4 * 19
+
+ // r0 = l0Γ—l0 + 19Γ—(l1Γ—l4 + l2Γ—l3 + l3Γ—l2 + l4Γ—l1) = l0Γ—l0 + 19Γ—2Γ—(l1Γ—l4 + l2Γ—l3)
+ r0 := mul64(l0, l0)
+ r0 = addMul64(r0, l1_38, l4)
+ r0 = addMul64(r0, l2_38, l3)
+
+ // r1 = l0Γ—l1 + l1Γ—l0 + 19Γ—(l2Γ—l4 + l3Γ—l3 + l4Γ—l2) = 2Γ—l0Γ—l1 + 19Γ—2Γ—l2Γ—l4 + 19Γ—l3Γ—l3
+ r1 := mul64(l0_2, l1)
+ r1 = addMul64(r1, l2_38, l4)
+ r1 = addMul64(r1, l3_19, l3)
+
+ // r2 = l0Γ—l2 + l1Γ—l1 + l2Γ—l0 + 19Γ—(l3Γ—l4 + l4Γ—l3) = 2Γ—l0Γ—l2 + l1Γ—l1 + 19Γ—2Γ—l3Γ—l4
+ r2 := mul64(l0_2, l2)
+ r2 = addMul64(r2, l1, l1)
+ r2 = addMul64(r2, l3_38, l4)
+
+ // r3 = l0Γ—l3 + l1Γ—l2 + l2Γ—l1 + l3Γ—l0 + 19Γ—l4Γ—l4 = 2Γ—l0Γ—l3 + 2Γ—l1Γ—l2 + 19Γ—l4Γ—l4
+ r3 := mul64(l0_2, l3)
+ r3 = addMul64(r3, l1_2, l2)
+ r3 = addMul64(r3, l4_19, l4)
+
+ // r4 = l0Γ—l4 + l1Γ—l3 + l2Γ—l2 + l3Γ—l1 + l4Γ—l0 = 2Γ—l0Γ—l4 + 2Γ—l1Γ—l3 + l2Γ—l2
+ r4 := mul64(l0_2, l4)
+ r4 = addMul64(r4, l1_2, l3)
+ r4 = addMul64(r4, l2, l2)
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+// carryPropagate brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁡⁡ + b = a * 19 + b) to the l4 carry. TODO inline
+func (v *Element) carryPropagateGeneric() *Element {
+ c0 := v.l0 >> 51
+ c1 := v.l1 >> 51
+ c2 := v.l2 >> 51
+ c3 := v.l3 >> 51
+ c4 := v.l4 >> 51
+
+ v.l0 = v.l0&maskLow51Bits + c4*19
+ v.l1 = v.l1&maskLow51Bits + c0
+ v.l2 = v.l2&maskLow51Bits + c1
+ v.l3 = v.l3&maskLow51Bits + c2
+ v.l4 = v.l4&maskLow51Bits + c3
+
+ return v
+}
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/bits_compat.go b/libgo/go/golang.org/x/crypto/internal/poly1305/bits_compat.go
new file mode 100644
index 00000000000..45b5c966b2b
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/bits_compat.go
@@ -0,0 +1,40 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.13
+// +build !go1.13
+
+package poly1305
+
+// Generic fallbacks for the math/bits intrinsics, copied from
+// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
+// variable time fallbacks until Go 1.13.
+
+func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
+ sum = x + y + carry
+ carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
+ return
+}
+
+func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
+ diff = x - y - borrow
+ borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
+ return
+}
+
+func bitsMul64(x, y uint64) (hi, lo uint64) {
+ const mask32 = 1<<32 - 1
+ x0 := x & mask32
+ x1 := x >> 32
+ y0 := y & mask32
+ y1 := y >> 32
+ w0 := x0 * y0
+ t := x1*y0 + w0>>32
+ w1 := t & mask32
+ w2 := t >> 32
+ w1 += x0 * y1
+ hi = x1*y1 + w2 + w1>>32
+ lo = x * y
+ return
+}
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/libgo/go/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
new file mode 100644
index 00000000000..ed52b3418ab
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
@@ -0,0 +1,22 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.13
+// +build go1.13
+
+package poly1305
+
+import "math/bits"
+
+func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
+ return bits.Add64(x, y, carry)
+}
+
+func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
+ return bits.Sub64(x, y, borrow)
+}
+
+func bitsMul64(x, y uint64) (hi, lo uint64) {
+ return bits.Mul64(x, y)
+}
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/libgo/go/golang.org/x/crypto/internal/poly1305/mac_noasm.go
new file mode 100644
index 00000000000..f184b67d98d
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/mac_noasm.go
@@ -0,0 +1,10 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
+// +build !amd64,!ppc64le,!s390x !gc purego
+
+package poly1305
+
+type mac struct{ macGeneric }
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/poly1305.go b/libgo/go/golang.org/x/crypto/internal/poly1305/poly1305.go
new file mode 100644
index 00000000000..4aaea810a26
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/poly1305.go
@@ -0,0 +1,99 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package poly1305 implements Poly1305 one-time message authentication code as
+// specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
+//
+// Poly1305 is a fast, one-time authentication function. It is infeasible for an
+// attacker to generate an authenticator for a message without the key. However, a
+// key must only be used for a single message. Authenticating two different
+// messages with the same key allows an attacker to forge authenticators for other
+// messages with the same key.
+//
+// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
+// used with a fixed key in order to generate one-time keys from an nonce.
+// However, in this package AES isn't used and the one-time key is specified
+// directly.
+package poly1305
+
+import "crypto/subtle"
+
+// TagSize is the size, in bytes, of a poly1305 authenticator.
+const TagSize = 16
+
+// Sum generates an authenticator for msg using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ h := New(key)
+ h.Write(m)
+ h.Sum(out[:0])
+}
+
+// Verify returns true if mac is a valid authenticator for m with the given key.
+func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
+ var tmp [16]byte
+ Sum(&tmp, m, key)
+ return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
+}
+
+// New returns a new MAC computing an authentication
+// tag of all data written to it with the given key.
+// This allows writing the message progressively instead
+// of passing it as a single slice. Common users should use
+// the Sum function instead.
+//
+// The key must be unique for each message, as authenticating
+// two different messages with the same key allows an attacker
+// to forge messages at will.
+func New(key *[32]byte) *MAC {
+ m := &MAC{}
+ initialize(key, &m.macState)
+ return m
+}
+
+// MAC is an io.Writer computing an authentication tag
+// of the data written to it.
+//
+// MAC cannot be used like common hash.Hash implementations,
+// because using a poly1305 key twice breaks its security.
+// Therefore writing data to a running MAC after calling
+// Sum or Verify causes it to panic.
+type MAC struct {
+ mac // platform-dependent implementation
+
+ finalized bool
+}
+
+// Size returns the number of bytes Sum will return.
+func (h *MAC) Size() int { return TagSize }
+
+// Write adds more data to the running message authentication code.
+// It never returns an error.
+//
+// It must not be called after the first call of Sum or Verify.
+func (h *MAC) Write(p []byte) (n int, err error) {
+ if h.finalized {
+ panic("poly1305: write to MAC after Sum or Verify")
+ }
+ return h.mac.Write(p)
+}
+
+// Sum computes the authenticator of all data written to the
+// message authentication code.
+func (h *MAC) Sum(b []byte) []byte {
+ var mac [TagSize]byte
+ h.mac.Sum(&mac)
+ h.finalized = true
+ return append(b, mac[:]...)
+}
+
+// Verify returns whether the authenticator of all data written to
+// the message authentication code matches the expected value.
+func (h *MAC) Verify(expected []byte) bool {
+ var mac [TagSize]byte
+ h.mac.Sum(&mac)
+ h.finalized = true
+ return subtle.ConstantTimeCompare(expected, mac[:]) == 1
+}
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_amd64.go
new file mode 100644
index 00000000000..6d522333f29
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_amd64.go
@@ -0,0 +1,48 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+// +build gc,!purego
+
+package poly1305
+
+//go:noescape
+func update(state *macState, msg []byte)
+
+// mac is a wrapper for macGeneric that redirects calls that would have gone to
+// updateGeneric to update.
+//
+// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
+// using function pointers would carry a major performance cost.
+type mac struct{ macGeneric }
+
+func (h *mac) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < TagSize {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ update(&h.macState, h.buffer[:])
+ }
+ if n := len(p) - (len(p) % TagSize); n > 0 {
+ update(&h.macState, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ h.offset += copy(h.buffer[h.offset:], p)
+ }
+ return nn, nil
+}
+
+func (h *mac) Sum(out *[16]byte) {
+ state := h.macState
+ if h.offset > 0 {
+ update(&state, h.buffer[:h.offset])
+ }
+ finalize(out, &state.h, &state.s)
+}
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/sum_generic.go b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_generic.go
new file mode 100644
index 00000000000..c942a65904f
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_generic.go
@@ -0,0 +1,310 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides the generic implementation of Sum and MAC. Other files
+// might provide optimized assembly implementations of some of this code.
+
+package poly1305
+
+import "encoding/binary"
+
+// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
+// for a 64 bytes message is approximately
+//
+// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
+//
+// for some secret r and s. It can be computed sequentially like
+//
+// for len(msg) > 0:
+// h += read(msg, 16)
+// h *= r
+// h %= 2¹³⁰ - 5
+// return h + s
+//
+// All the complexity is about doing performant constant-time math on numbers
+// larger than any available numeric type.
+
+func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
+ h := newMACGeneric(key)
+ h.Write(msg)
+ h.Sum(out)
+}
+
+func newMACGeneric(key *[32]byte) macGeneric {
+ m := macGeneric{}
+ initialize(key, &m.macState)
+ return m
+}
+
+// macState holds numbers in saturated 64-bit little-endian limbs. That is,
+// the value of [x0, x1, x2] is x[0] + x[1] * 2⁢⁴ + x[2] * 2¹²⁸.
+type macState struct {
+ // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
+ // can grow larger during and after rounds. It must, however, remain below
+ // 2 * (2¹³⁰ - 5).
+ h [3]uint64
+ // r and s are the private key components.
+ r [2]uint64
+ s [2]uint64
+}
+
+type macGeneric struct {
+ macState
+
+ buffer [TagSize]byte
+ offset int
+}
+
+// Write splits the incoming message into TagSize chunks, and passes them to
+// update. It buffers incomplete chunks.
+func (h *macGeneric) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < TagSize {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ updateGeneric(&h.macState, h.buffer[:])
+ }
+ if n := len(p) - (len(p) % TagSize); n > 0 {
+ updateGeneric(&h.macState, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ h.offset += copy(h.buffer[h.offset:], p)
+ }
+ return nn, nil
+}
+
+// Sum flushes the last incomplete chunk from the buffer, if any, and generates
+// the MAC output. It does not modify its state, in order to allow for multiple
+// calls to Sum, even if no Write is allowed after Sum.
+func (h *macGeneric) Sum(out *[TagSize]byte) {
+ state := h.macState
+ if h.offset > 0 {
+ updateGeneric(&state, h.buffer[:h.offset])
+ }
+ finalize(out, &state.h, &state.s)
+}
+
+// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
+// clears some bits of the secret coefficient to make it possible to implement
+// multiplication more efficiently.
+const (
+ rMask0 = 0x0FFFFFFC0FFFFFFF
+ rMask1 = 0x0FFFFFFC0FFFFFFC
+)
+
+// initialize loads the 256-bit key into the two 128-bit secret values r and s.
+func initialize(key *[32]byte, m *macState) {
+ m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
+ m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
+ m.s[0] = binary.LittleEndian.Uint64(key[16:24])
+ m.s[1] = binary.LittleEndian.Uint64(key[24:32])
+}
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+func mul64(a, b uint64) uint128 {
+ hi, lo := bitsMul64(a, b)
+ return uint128{lo, hi}
+}
+
+func add128(a, b uint128) uint128 {
+ lo, c := bitsAdd64(a.lo, b.lo, 0)
+ hi, c := bitsAdd64(a.hi, b.hi, c)
+ if c != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+ return uint128{lo, hi}
+}
+
+func shiftRightBy2(a uint128) uint128 {
+ a.lo = a.lo>>2 | (a.hi&3)<<62
+ a.hi = a.hi >> 2
+ return a
+}
+
+// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
+// 128 bits of message, it computes
+//
+// hβ‚Š = (h + m) * r mod 2¹³⁰ - 5
+//
+// If the msg length is not a multiple of TagSize, it assumes the last
+// incomplete chunk is the final one.
+func updateGeneric(state *macState, msg []byte) {
+ h0, h1, h2 := state.h[0], state.h[1], state.h[2]
+ r0, r1 := state.r[0], state.r[1]
+
+ for len(msg) > 0 {
+ var c uint64
+
+ // For the first step, h + m, we use a chain of bits.Add64 intrinsics.
+ // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
+ // reduced at the end of the multiplication below.
+ //
+ // The spec requires us to set a bit just above the message size, not to
+ // hide leading zeroes. For full chunks, that's 1 << 128, so we can just
+ // add 1 to the most significant (2¹²⁸) limb, h2.
+ if len(msg) >= TagSize {
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
+ h2 += c + 1
+
+ msg = msg[TagSize:]
+ } else {
+ var buf [TagSize]byte
+ copy(buf[:], msg)
+ buf[len(msg)] = 1
+
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
+ h2 += c
+
+ msg = nil
+ }
+
+ // Multiplication of big number limbs is similar to elementary school
+ // columnar multiplication. Instead of digits, there are 64-bit limbs.
+ //
+ // We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
+ //
+ // h2 h1 h0 x
+ // r1 r0 =
+ // ----------------
+ // h2r0 h1r0 h0r0 <-- individual 128-bit products
+ // + h2r1 h1r1 h0r1
+ // ------------------------
+ // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
+ // ------------------------
+ // m3.hi m2.hi m1.hi m0.hi <-- carry propagation
+ // + m3.lo m2.lo m1.lo m0.lo
+ // -------------------------------
+ // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
+ //
+ // The main difference from pen-and-paper multiplication is that we do
+ // carry propagation in a separate step, as if we wrote two digit sums
+ // at first (the 128-bit limbs), and then carried the tens all at once.
+
+ h0r0 := mul64(h0, r0)
+ h1r0 := mul64(h1, r0)
+ h2r0 := mul64(h2, r0)
+ h0r1 := mul64(h0, r1)
+ h1r1 := mul64(h1, r1)
+ h2r1 := mul64(h2, r1)
+
+ // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
+ // top 4 bits cleared by rMask{0,1}, we know that their product is not going
+ // to overflow 64 bits, so we can ignore the high part of the products.
+ //
+ // This also means that the product doesn't have a fifth limb (t4).
+ if h2r0.hi != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+ if h2r1.hi != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+
+ m0 := h0r0
+ m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
+ m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
+ m3 := h2r1
+
+ t0 := m0.lo
+ t1, c := bitsAdd64(m1.lo, m0.hi, 0)
+ t2, c := bitsAdd64(m2.lo, m1.hi, c)
+ t3, _ := bitsAdd64(m3.lo, m2.hi, c)
+
+ // Now we have the result as 4 64-bit limbs, and we need to reduce it
+ // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
+ // a cheap partial reduction according to the reduction identity
+ //
+ // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
+ //
+ // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
+ // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
+ // assumptions we make about h in the rest of the code.
+ //
+ // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
+
+ // We split the final result at the 2¹³⁰ mark into h and cc, the carry.
+ // Note that the carry bits are effectively shifted left by 2, in other
+ // words, cc = c * 4 for the c in the reduction identity.
+ h0, h1, h2 = t0, t1, t2&maskLow2Bits
+ cc := uint128{t2 & maskNotLow2Bits, t3}
+
+ // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
+
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
+ h2 += c
+
+ cc = shiftRightBy2(cc)
+
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
+ h2 += c
+
+ // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
+ //
+ // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
+ }
+
+ state.h[0], state.h[1], state.h[2] = h0, h1, h2
+}
+
+const (
+ maskLow2Bits uint64 = 0x0000000000000003
+ maskNotLow2Bits uint64 = ^maskLow2Bits
+)
+
+// select64 returns x if v == 1 and y if v == 0, in constant time.
+func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
+
+// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
+const (
+ p0 = 0xFFFFFFFFFFFFFFFB
+ p1 = 0xFFFFFFFFFFFFFFFF
+ p2 = 0x0000000000000003
+)
+
+// finalize completes the modular reduction of h and computes
+//
+// out = h + s mod 2¹²⁸
+//
+func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
+ h0, h1, h2 := h[0], h[1], h[2]
+
+ // After the partial reduction in updateGeneric, h might be more than
+ // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
+ // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
+ // result if the subtraction underflows, and t otherwise.
+
+ hMinusP0, b := bitsSub64(h0, p0, 0)
+ hMinusP1, b := bitsSub64(h1, p1, b)
+ _, b = bitsSub64(h2, p2, b)
+
+ // h = h if h < p else h - p
+ h0 = select64(b, h0, hMinusP0)
+ h1 = select64(b, h1, hMinusP1)
+
+ // Finally, we compute the last Poly1305 step
+ //
+ // tag = h + s mod 2¹²⁸
+ //
+ // by just doing a wide addition with the 128 low bits of h and discarding
+ // the overflow.
+ h0, c := bitsAdd64(h0, s[0], 0)
+ h1, _ = bitsAdd64(h1, s[1], c)
+
+ binary.LittleEndian.PutUint64(out[0:8], h0)
+ binary.LittleEndian.PutUint64(out[8:16], h1)
+}
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
new file mode 100644
index 00000000000..4a069941a6e
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
@@ -0,0 +1,48 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+// +build gc,!purego
+
+package poly1305
+
+//go:noescape
+func update(state *macState, msg []byte)
+
+// mac is a wrapper for macGeneric that redirects calls that would have gone to
+// updateGeneric to update.
+//
+// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
+// using function pointers would carry a major performance cost.
+type mac struct{ macGeneric }
+
+func (h *mac) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < TagSize {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ update(&h.macState, h.buffer[:])
+ }
+ if n := len(p) - (len(p) % TagSize); n > 0 {
+ update(&h.macState, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ h.offset += copy(h.buffer[h.offset:], p)
+ }
+ return nn, nil
+}
+
+func (h *mac) Sum(out *[16]byte) {
+ state := h.macState
+ if h.offset > 0 {
+ update(&state, h.buffer[:h.offset])
+ }
+ finalize(out, &state.h, &state.s)
+}
diff --git a/libgo/go/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_s390x.go
new file mode 100644
index 00000000000..62cc9f84709
--- /dev/null
+++ b/libgo/go/golang.org/x/crypto/internal/poly1305/sum_s390x.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+// +build gc,!purego
+
+package poly1305
+
+import (
+ "golang.org/x/sys/cpu"
+)
+
+// updateVX is an assembly implementation of Poly1305 that uses vector
+// instructions. It must only be called if the vector facility (vx) is
+// available.
+//go:noescape
+func updateVX(state *macState, msg []byte)
+
+// mac is a replacement for macGeneric that uses a larger buffer and redirects
+// calls that would have gone to updateGeneric to updateVX if the vector
+// facility is installed.
+//
+// A larger buffer is required for good performance because the vector
+// implementation has a higher fixed cost per call than the generic
+// implementation.
+type mac struct {
+ macState
+
+ buffer [16 * TagSize]byte // size must be a multiple of block size (16)
+ offset int
+}
+
+func (h *mac) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < len(h.buffer) {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ if cpu.S390X.HasVX {
+ updateVX(&h.macState, h.buffer[:])
+ } else {
+ updateGeneric(&h.macState, h.buffer[:])
+ }
+ }
+
+ tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
+ body := len(p) - tail // number of bytes to process now
+ if body > 0 {
+ if cpu.S390X.HasVX {
+ updateVX(&h.macState, p[:body])
+ } else {
+ updateGeneric(&h.macState, p[:body])
+ }
+ }
+ h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
+ return nn, nil
+}
+
+func (h *mac) Sum(out *[TagSize]byte) {
+ state := h.macState
+ remainder := h.buffer[:h.offset]
+
+ // Use the generic implementation if we have 2 or fewer blocks left
+ // to sum. The vector implementation has a higher startup time.
+ if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
+ updateVX(&state, remainder)
+ } else if len(remainder) > 0 {
+ updateGeneric(&state, remainder)
+ }
+ finalize(out, &state.h, &state.s)
+}
diff --git a/libgo/go/golang.org/x/mod/modfile/rule.go b/libgo/go/golang.org/x/mod/modfile/rule.go
index 78f83fa7144..ed2f31aa70e 100644
--- a/libgo/go/golang.org/x/mod/modfile/rule.go
+++ b/libgo/go/golang.org/x/mod/modfile/rule.go
@@ -423,68 +423,12 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
}
case "replace":
- arrow := 2
- if len(args) >= 2 && args[1] == "=>" {
- arrow = 1
- }
- if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
- errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb)
- return
- }
- s, err := parseString(&args[0])
- if err != nil {
- errorf("invalid quoted string: %v", err)
- return
- }
- pathMajor, err := modulePathMajor(s)
- if err != nil {
- wrapModPathError(s, err)
- return
- }
- var v string
- if arrow == 2 {
- v, err = parseVersion(verb, s, &args[1], fix)
- if err != nil {
- wrapError(err)
- return
- }
- if err := module.CheckPathMajor(v, pathMajor); err != nil {
- wrapModPathError(s, err)
- return
- }
- }
- ns, err := parseString(&args[arrow+1])
- if err != nil {
- errorf("invalid quoted string: %v", err)
+ replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix)
+ if wrappederr != nil {
+ *errs = append(*errs, *wrappederr)
return
}
- nv := ""
- if len(args) == arrow+2 {
- if !IsDirectoryPath(ns) {
- errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)")
- return
- }
- if filepath.Separator == '/' && strings.Contains(ns, `\`) {
- errorf("replacement directory appears to be Windows path (on a non-windows system)")
- return
- }
- }
- if len(args) == arrow+3 {
- nv, err = parseVersion(verb, ns, &args[arrow+2], fix)
- if err != nil {
- wrapError(err)
- return
- }
- if IsDirectoryPath(ns) {
- errorf("replacement module directory path %q cannot have version", ns)
- return
- }
- }
- f.Replace = append(f.Replace, &Replace{
- Old: module.Version{Path: s, Version: v},
- New: module.Version{Path: ns, Version: nv},
- Syntax: line,
- })
+ f.Replace = append(f.Replace, replace)
case "retract":
rationale := parseDirectiveComment(block, line)
@@ -515,6 +459,83 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
}
}
+func parseReplace(filename string, line *Line, verb string, args []string, fix VersionFixer) (*Replace, *Error) {
+ wrapModPathError := func(modPath string, err error) *Error {
+ return &Error{
+ Filename: filename,
+ Pos: line.Start,
+ ModPath: modPath,
+ Verb: verb,
+ Err: err,
+ }
+ }
+ wrapError := func(err error) *Error {
+ return &Error{
+ Filename: filename,
+ Pos: line.Start,
+ Err: err,
+ }
+ }
+ errorf := func(format string, args ...interface{}) *Error {
+ return wrapError(fmt.Errorf(format, args...))
+ }
+
+ arrow := 2
+ if len(args) >= 2 && args[1] == "=>" {
+ arrow = 1
+ }
+ if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
+ return nil, errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb)
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ return nil, errorf("invalid quoted string: %v", err)
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ return nil, wrapModPathError(s, err)
+
+ }
+ var v string
+ if arrow == 2 {
+ v, err = parseVersion(verb, s, &args[1], fix)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+ if err := module.CheckPathMajor(v, pathMajor); err != nil {
+ return nil, wrapModPathError(s, err)
+ }
+ }
+ ns, err := parseString(&args[arrow+1])
+ if err != nil {
+ return nil, errorf("invalid quoted string: %v", err)
+ }
+ nv := ""
+ if len(args) == arrow+2 {
+ if !IsDirectoryPath(ns) {
+ return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)")
+ }
+ if filepath.Separator == '/' && strings.Contains(ns, `\`) {
+ return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)")
+ }
+ }
+ if len(args) == arrow+3 {
+ nv, err = parseVersion(verb, ns, &args[arrow+2], fix)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+ if IsDirectoryPath(ns) {
+ return nil, errorf("replacement module directory path %q cannot have version", ns)
+
+ }
+ }
+ return &Replace{
+ Old: module.Version{Path: s, Version: v},
+ New: module.Version{Path: ns, Version: nv},
+ Syntax: line,
+ }, nil
+}
+
// fixRetract applies fix to each retract directive in f, appending any errors
// to errs.
//
@@ -556,6 +577,63 @@ func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) {
}
}
+func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer) {
+ wrapError := func(err error) {
+ *errs = append(*errs, Error{
+ Filename: f.Syntax.Name,
+ Pos: line.Start,
+ Err: err,
+ })
+ }
+ errorf := func(format string, args ...interface{}) {
+ wrapError(fmt.Errorf(format, args...))
+ }
+
+ switch verb {
+ default:
+ errorf("unknown directive: %s", verb)
+
+ case "go":
+ if f.Go != nil {
+ errorf("repeated go statement")
+ return
+ }
+ if len(args) != 1 {
+ errorf("go directive expects exactly one argument")
+ return
+ } else if !GoVersionRE.MatchString(args[0]) {
+ errorf("invalid go version '%s': must match format 1.23", args[0])
+ return
+ }
+
+ f.Go = &Go{Syntax: line}
+ f.Go.Version = args[0]
+
+ case "use":
+ if len(args) != 1 {
+ errorf("usage: %s local/dir", verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ errorf("invalid quoted string: %v", err)
+ return
+ }
+ f.Use = append(f.Use, &Use{
+ Path: s,
+ Syntax: line,
+ })
+
+ case "replace":
+ replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix)
+ if wrappederr != nil {
+ *errs = append(*errs, *wrappederr)
+ return
+ }
+ f.Replace = append(f.Replace, replace)
+ }
+}
+
// IsDirectoryPath reports whether the given path should be interpreted
// as a directory path. Just like on the go command line, relative paths
// and rooted paths are directory paths; the rest are module paths.
@@ -956,170 +1034,217 @@ func (f *File) SetRequire(req []*Require) {
// SetRequireSeparateIndirect updates the requirements of f to contain the given
// requirements. Comment contents (except for 'indirect' markings) are retained
-// from the first existing requirement for each module path, and block structure
-// is maintained as long as the indirect markings match.
+// from the first existing requirement for each module path. Like SetRequire,
+// SetRequireSeparateIndirect adds requirements for new paths in req,
+// updates the version and "// indirect" comment on existing requirements,
+// and deletes requirements on paths not in req. Existing duplicate requirements
+// are deleted.
//
-// Any requirements on paths not already present in the file are added. Direct
-// requirements are added to the last block containing *any* other direct
-// requirement. Indirect requirements are added to the last block containing
-// *only* other indirect requirements. If no suitable block exists, a new one is
-// added, with the last block containing a direct dependency (if any)
-// immediately before the first block containing only indirect dependencies.
+// As its name suggests, SetRequireSeparateIndirect puts direct and indirect
+// requirements into two separate blocks, one containing only direct
+// requirements, and the other containing only indirect requirements.
+// SetRequireSeparateIndirect may move requirements between these two blocks
+// when their indirect markings change. However, SetRequireSeparateIndirect
+// won't move requirements from other blocks, especially blocks with comments.
//
-// The Syntax field is ignored for requirements in the given blocks.
+// If the file initially has one uncommented block of requirements,
+// SetRequireSeparateIndirect will split it into a direct-only and indirect-only
+// block. This aids in the transition to separate blocks.
func (f *File) SetRequireSeparateIndirect(req []*Require) {
- type modKey struct {
- path string
- indirect bool
- }
- need := make(map[modKey]string)
- for _, r := range req {
- need[modKey{r.Mod.Path, r.Indirect}] = r.Mod.Version
+ // hasComments returns whether a line or block has comments
+ // other than "indirect".
+ hasComments := func(c Comments) bool {
+ return len(c.Before) > 0 || len(c.After) > 0 || len(c.Suffix) > 1 ||
+ (len(c.Suffix) == 1 &&
+ strings.TrimSpace(strings.TrimPrefix(c.Suffix[0].Token, string(slashSlash))) != "indirect")
}
- comments := make(map[string]Comments)
- for _, r := range f.Require {
- v, ok := need[modKey{r.Mod.Path, r.Indirect}]
- if !ok {
- if _, ok := need[modKey{r.Mod.Path, !r.Indirect}]; ok {
- if _, dup := comments[r.Mod.Path]; !dup {
- comments[r.Mod.Path] = r.Syntax.Comments
- }
+ // moveReq adds r to block. If r was in another block, moveReq deletes
+ // it from that block and transfers its comments.
+ moveReq := func(r *Require, block *LineBlock) {
+ var line *Line
+ if r.Syntax == nil {
+ line = &Line{Token: []string{AutoQuote(r.Mod.Path), r.Mod.Version}}
+ r.Syntax = line
+ if r.Indirect {
+ r.setIndirect(true)
}
- r.markRemoved()
- continue
+ } else {
+ line = new(Line)
+ *line = *r.Syntax
+ if !line.InBlock && len(line.Token) > 0 && line.Token[0] == "require" {
+ line.Token = line.Token[1:]
+ }
+ r.Syntax.Token = nil // Cleanup will delete the old line.
+ r.Syntax = line
}
- r.setVersion(v)
- delete(need, modKey{r.Mod.Path, r.Indirect})
+ line.InBlock = true
+ block.Line = append(block.Line, line)
}
+ // Examine existing require lines and blocks.
var (
- lastDirectOrMixedBlock Expr
- firstIndirectOnlyBlock Expr
- lastIndirectOnlyBlock Expr
+ // We may insert new requirements into the last uncommented
+ // direct-only and indirect-only blocks. We may also move requirements
+ // to the opposite block if their indirect markings change.
+ lastDirectIndex = -1
+ lastIndirectIndex = -1
+
+ // If there are no direct-only or indirect-only blocks, a new block may
+ // be inserted after the last require line or block.
+ lastRequireIndex = -1
+
+ // If there's only one require line or block, and it's uncommented,
+ // we'll move its requirements to the direct-only or indirect-only blocks.
+ requireLineOrBlockCount = 0
+
+ // Track the block each requirement belongs to (if any) so we can
+ // move them later.
+ lineToBlock = make(map[*Line]*LineBlock)
)
- for _, stmt := range f.Syntax.Stmt {
+ for i, stmt := range f.Syntax.Stmt {
switch stmt := stmt.(type) {
case *Line:
if len(stmt.Token) == 0 || stmt.Token[0] != "require" {
continue
}
- if isIndirect(stmt) {
- lastIndirectOnlyBlock = stmt
- } else {
- lastDirectOrMixedBlock = stmt
+ lastRequireIndex = i
+ requireLineOrBlockCount++
+ if !hasComments(stmt.Comments) {
+ if isIndirect(stmt) {
+ lastIndirectIndex = i
+ } else {
+ lastDirectIndex = i
+ }
}
+
case *LineBlock:
if len(stmt.Token) == 0 || stmt.Token[0] != "require" {
continue
}
- indirectOnly := true
+ lastRequireIndex = i
+ requireLineOrBlockCount++
+ allDirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments)
+ allIndirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments)
for _, line := range stmt.Line {
- if len(line.Token) == 0 {
- continue
- }
- if !isIndirect(line) {
- indirectOnly = false
- break
+ lineToBlock[line] = stmt
+ if hasComments(line.Comments) {
+ allDirect = false
+ allIndirect = false
+ } else if isIndirect(line) {
+ allDirect = false
+ } else {
+ allIndirect = false
}
}
- if indirectOnly {
- lastIndirectOnlyBlock = stmt
- if firstIndirectOnlyBlock == nil {
- firstIndirectOnlyBlock = stmt
- }
- } else {
- lastDirectOrMixedBlock = stmt
+ if allDirect {
+ lastDirectIndex = i
+ }
+ if allIndirect {
+ lastIndirectIndex = i
}
}
}
- isOrContainsStmt := func(stmt Expr, target Expr) bool {
- if stmt == target {
- return true
- }
- if stmt, ok := stmt.(*LineBlock); ok {
- if target, ok := target.(*Line); ok {
- for _, line := range stmt.Line {
- if line == target {
- return true
- }
- }
+ oneFlatUncommentedBlock := requireLineOrBlockCount == 1 &&
+ !hasComments(*f.Syntax.Stmt[lastRequireIndex].Comment())
+
+ // Create direct and indirect blocks if needed. Convert lines into blocks
+ // if needed. If we end up with an empty block or a one-line block,
+ // Cleanup will delete it or convert it to a line later.
+ insertBlock := func(i int) *LineBlock {
+ block := &LineBlock{Token: []string{"require"}}
+ f.Syntax.Stmt = append(f.Syntax.Stmt, nil)
+ copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:])
+ f.Syntax.Stmt[i] = block
+ return block
+ }
+
+ ensureBlock := func(i int) *LineBlock {
+ switch stmt := f.Syntax.Stmt[i].(type) {
+ case *LineBlock:
+ return stmt
+ case *Line:
+ block := &LineBlock{
+ Token: []string{"require"},
+ Line: []*Line{stmt},
}
+ stmt.Token = stmt.Token[1:] // remove "require"
+ stmt.InBlock = true
+ f.Syntax.Stmt[i] = block
+ return block
+ default:
+ panic(fmt.Sprintf("unexpected statement: %v", stmt))
}
- return false
}
- addRequire := func(path, vers string, indirect bool, comments Comments) {
- var line *Line
- if indirect {
- if lastIndirectOnlyBlock != nil {
- line = f.Syntax.addLine(lastIndirectOnlyBlock, "require", path, vers)
- } else {
- // Add a new require block after the last direct-only or mixed "require"
- // block (if any).
- //
- // (f.Syntax.addLine would add the line to an existing "require" block if
- // present, but here the existing "require" blocks are all direct-only, so
- // we know we need to add a new block instead.)
- line = &Line{Token: []string{"require", path, vers}}
- lastIndirectOnlyBlock = line
- firstIndirectOnlyBlock = line // only block implies first block
- if lastDirectOrMixedBlock == nil {
- f.Syntax.Stmt = append(f.Syntax.Stmt, line)
- } else {
- for i, stmt := range f.Syntax.Stmt {
- if isOrContainsStmt(stmt, lastDirectOrMixedBlock) {
- f.Syntax.Stmt = append(f.Syntax.Stmt, nil) // increase size
- copy(f.Syntax.Stmt[i+2:], f.Syntax.Stmt[i+1:]) // shuffle elements up
- f.Syntax.Stmt[i+1] = line
- break
- }
- }
- }
- }
+ var lastDirectBlock *LineBlock
+ if lastDirectIndex < 0 {
+ if lastIndirectIndex >= 0 {
+ lastDirectIndex = lastIndirectIndex
+ lastIndirectIndex++
+ } else if lastRequireIndex >= 0 {
+ lastDirectIndex = lastRequireIndex + 1
} else {
- if lastDirectOrMixedBlock != nil {
- line = f.Syntax.addLine(lastDirectOrMixedBlock, "require", path, vers)
- } else {
- // Add a new require block before the first indirect block (if any).
- //
- // That way if the file initially contains only indirect lines,
- // the direct lines still appear before it: we preserve existing
- // structure, but only to the extent that that structure already
- // reflects the direct/indirect split.
- line = &Line{Token: []string{"require", path, vers}}
- lastDirectOrMixedBlock = line
- if firstIndirectOnlyBlock == nil {
- f.Syntax.Stmt = append(f.Syntax.Stmt, line)
- } else {
- for i, stmt := range f.Syntax.Stmt {
- if isOrContainsStmt(stmt, firstIndirectOnlyBlock) {
- f.Syntax.Stmt = append(f.Syntax.Stmt, nil) // increase size
- copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:]) // shuffle elements up
- f.Syntax.Stmt[i] = line
- break
- }
- }
- }
- }
+ lastDirectIndex = len(f.Syntax.Stmt)
}
+ lastDirectBlock = insertBlock(lastDirectIndex)
+ } else {
+ lastDirectBlock = ensureBlock(lastDirectIndex)
+ }
- line.Comments.Before = commentsAdd(line.Comments.Before, comments.Before)
- line.Comments.Suffix = commentsAdd(line.Comments.Suffix, comments.Suffix)
+ var lastIndirectBlock *LineBlock
+ if lastIndirectIndex < 0 {
+ lastIndirectIndex = lastDirectIndex + 1
+ lastIndirectBlock = insertBlock(lastIndirectIndex)
+ } else {
+ lastIndirectBlock = ensureBlock(lastIndirectIndex)
+ }
- r := &Require{
- Mod: module.Version{Path: path, Version: vers},
- Indirect: indirect,
- Syntax: line,
+ // Delete requirements we don't want anymore.
+ // Update versions and indirect comments on requirements we want to keep.
+ // If a requirement is in last{Direct,Indirect}Block with the wrong
+ // indirect marking after this, or if the requirement is in an single
+ // uncommented mixed block (oneFlatUncommentedBlock), move it to the
+ // correct block.
+ //
+ // Some blocks may be empty after this. Cleanup will remove them.
+ need := make(map[string]*Require)
+ for _, r := range req {
+ need[r.Mod.Path] = r
+ }
+ have := make(map[string]*Require)
+ for _, r := range f.Require {
+ path := r.Mod.Path
+ if need[path] == nil || have[path] != nil {
+ // Requirement not needed, or duplicate requirement. Delete.
+ r.markRemoved()
+ continue
+ }
+ have[r.Mod.Path] = r
+ r.setVersion(need[path].Mod.Version)
+ r.setIndirect(need[path].Indirect)
+ if need[path].Indirect &&
+ (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastDirectBlock) {
+ moveReq(r, lastIndirectBlock)
+ } else if !need[path].Indirect &&
+ (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastIndirectBlock) {
+ moveReq(r, lastDirectBlock)
}
- r.setIndirect(indirect)
- f.Require = append(f.Require, r)
}
- for k, vers := range need {
- addRequire(k.path, vers, k.indirect, comments[k.path])
+ // Add new requirements.
+ for path, r := range need {
+ if have[path] == nil {
+ if r.Indirect {
+ moveReq(r, lastIndirectBlock)
+ } else {
+ moveReq(r, lastDirectBlock)
+ }
+ f.Require = append(f.Require, r)
+ }
}
+
f.SortBlocks()
}
@@ -1165,6 +1290,10 @@ func (f *File) DropExclude(path, vers string) error {
}
func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
+ return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers)
+}
+
+func addReplace(syntax *FileSyntax, replace *[]*Replace, oldPath, oldVers, newPath, newVers string) error {
need := true
old := module.Version{Path: oldPath, Version: oldVers}
new := module.Version{Path: newPath, Version: newVers}
@@ -1178,12 +1307,12 @@ func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
}
var hint *Line
- for _, r := range f.Replace {
+ for _, r := range *replace {
if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) {
if need {
// Found replacement for old; update to use new.
r.New = new
- f.Syntax.updateLine(r.Syntax, tokens...)
+ syntax.updateLine(r.Syntax, tokens...)
need = false
continue
}
@@ -1196,7 +1325,7 @@ func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
}
}
if need {
- f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)})
+ *replace = append(*replace, &Replace{Old: old, New: new, Syntax: syntax.addLine(hint, tokens...)})
}
return nil
}
@@ -1282,30 +1411,36 @@ func (f *File) SortBlocks() {
// retract directives are not de-duplicated since comments are
// meaningful, and versions may be retracted multiple times.
func (f *File) removeDups() {
+ removeDups(f.Syntax, &f.Exclude, &f.Replace)
+}
+
+func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
kill := make(map[*Line]bool)
// Remove duplicate excludes.
- haveExclude := make(map[module.Version]bool)
- for _, x := range f.Exclude {
- if haveExclude[x.Mod] {
- kill[x.Syntax] = true
- continue
+ if exclude != nil {
+ haveExclude := make(map[module.Version]bool)
+ for _, x := range *exclude {
+ if haveExclude[x.Mod] {
+ kill[x.Syntax] = true
+ continue
+ }
+ haveExclude[x.Mod] = true
}
- haveExclude[x.Mod] = true
- }
- var excl []*Exclude
- for _, x := range f.Exclude {
- if !kill[x.Syntax] {
- excl = append(excl, x)
+ var excl []*Exclude
+ for _, x := range *exclude {
+ if !kill[x.Syntax] {
+ excl = append(excl, x)
+ }
}
+ *exclude = excl
}
- f.Exclude = excl
// Remove duplicate replacements.
// Later replacements take priority over earlier ones.
haveReplace := make(map[module.Version]bool)
- for i := len(f.Replace) - 1; i >= 0; i-- {
- x := f.Replace[i]
+ for i := len(*replace) - 1; i >= 0; i-- {
+ x := (*replace)[i]
if haveReplace[x.Old] {
kill[x.Syntax] = true
continue
@@ -1313,18 +1448,18 @@ func (f *File) removeDups() {
haveReplace[x.Old] = true
}
var repl []*Replace
- for _, x := range f.Replace {
+ for _, x := range *replace {
if !kill[x.Syntax] {
repl = append(repl, x)
}
}
- f.Replace = repl
+ *replace = repl
// Duplicate require and retract directives are not removed.
// Drop killed statements from the syntax tree.
var stmts []Expr
- for _, stmt := range f.Syntax.Stmt {
+ for _, stmt := range syntax.Stmt {
switch stmt := stmt.(type) {
case *Line:
if kill[stmt] {
@@ -1344,7 +1479,7 @@ func (f *File) removeDups() {
}
stmts = append(stmts, stmt)
}
- f.Syntax.Stmt = stmts
+ syntax.Stmt = stmts
}
// lineLess returns whether li should be sorted before lj. It sorts
diff --git a/libgo/go/golang.org/x/mod/modfile/work.go b/libgo/go/golang.org/x/mod/modfile/work.go
new file mode 100644
index 00000000000..0c0e521525a
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/modfile/work.go
@@ -0,0 +1,234 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A WorkFile is the parsed, interpreted form of a go.work file.
+type WorkFile struct {
+ Go *Go
+ Use []*Use
+ Replace []*Replace
+
+ Syntax *FileSyntax
+}
+
+// A Use is a single directory statement.
+type Use struct {
+ Path string // Use path of module.
+ ModulePath string // Module path in the comment.
+ Syntax *Line
+}
+
+// ParseWork parses and returns a go.work file.
+//
+// file is the name of the file, used in positions and errors.
+//
+// data is the content of the file.
+//
+// fix is an optional function that canonicalizes module versions.
+// If fix is nil, all module versions must be canonical (module.CanonicalVersion
+// must return the same string).
+func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) {
+ fs, err := parse(file, data)
+ if err != nil {
+ return nil, err
+ }
+ f := &WorkFile{
+ Syntax: fs,
+ }
+ var errs ErrorList
+
+ for _, x := range fs.Stmt {
+ switch x := x.(type) {
+ case *Line:
+ f.add(&errs, x, x.Token[0], x.Token[1:], fix)
+
+ case *LineBlock:
+ if len(x.Token) > 1 {
+ errs = append(errs, Error{
+ Filename: file,
+ Pos: x.Start,
+ Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+ })
+ continue
+ }
+ switch x.Token[0] {
+ default:
+ errs = append(errs, Error{
+ Filename: file,
+ Pos: x.Start,
+ Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+ })
+ continue
+ case "use", "replace":
+ for _, l := range x.Line {
+ f.add(&errs, l, x.Token[0], l.Token, fix)
+ }
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return f, nil
+}
+
+// Cleanup cleans up the file f after any edit operations.
+// To avoid quadratic behavior, modifications like DropRequire
+// clear the entry but do not remove it from the slice.
+// Cleanup cleans out all the cleared entries.
+func (f *WorkFile) Cleanup() {
+ w := 0
+ for _, r := range f.Use {
+ if r.Path != "" {
+ f.Use[w] = r
+ w++
+ }
+ }
+ f.Use = f.Use[:w]
+
+ w = 0
+ for _, r := range f.Replace {
+ if r.Old.Path != "" {
+ f.Replace[w] = r
+ w++
+ }
+ }
+ f.Replace = f.Replace[:w]
+
+ f.Syntax.Cleanup()
+}
+
+func (f *WorkFile) AddGoStmt(version string) error {
+ if !GoVersionRE.MatchString(version) {
+ return fmt.Errorf("invalid language version string %q", version)
+ }
+ if f.Go == nil {
+ stmt := &Line{Token: []string{"go", version}}
+ f.Go = &Go{
+ Version: version,
+ Syntax: stmt,
+ }
+ // Find the first non-comment-only block that's and add
+ // the go statement before it. That will keep file comments at the top.
+ i := 0
+ for i = 0; i < len(f.Syntax.Stmt); i++ {
+ if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok {
+ break
+ }
+ }
+ f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...)
+ } else {
+ f.Go.Version = version
+ f.Syntax.updateLine(f.Go.Syntax, "go", version)
+ }
+ return nil
+}
+
+func (f *WorkFile) AddUse(diskPath, modulePath string) error {
+ need := true
+ for _, d := range f.Use {
+ if d.Path == diskPath {
+ if need {
+ d.ModulePath = modulePath
+ f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath))
+ need = false
+ } else {
+ d.Syntax.markRemoved()
+ *d = Use{}
+ }
+ }
+ }
+
+ if need {
+ f.AddNewUse(diskPath, modulePath)
+ }
+ return nil
+}
+
+func (f *WorkFile) AddNewUse(diskPath, modulePath string) {
+ line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath))
+ f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line})
+}
+
+func (f *WorkFile) SetUse(dirs []*Use) {
+ need := make(map[string]string)
+ for _, d := range dirs {
+ need[d.Path] = d.ModulePath
+ }
+
+ for _, d := range f.Use {
+ if modulePath, ok := need[d.Path]; ok {
+ d.ModulePath = modulePath
+ } else {
+ d.Syntax.markRemoved()
+ *d = Use{}
+ }
+ }
+
+ // TODO(#45713): Add module path to comment.
+
+ for diskPath, modulePath := range need {
+ f.AddNewUse(diskPath, modulePath)
+ }
+ f.SortBlocks()
+}
+
+func (f *WorkFile) DropUse(path string) error {
+ for _, d := range f.Use {
+ if d.Path == path {
+ d.Syntax.markRemoved()
+ *d = Use{}
+ }
+ }
+ return nil
+}
+
+func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error {
+ return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers)
+}
+
+func (f *WorkFile) DropReplace(oldPath, oldVers string) error {
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && r.Old.Version == oldVers {
+ r.Syntax.markRemoved()
+ *r = Replace{}
+ }
+ }
+ return nil
+}
+
+func (f *WorkFile) SortBlocks() {
+ f.removeDups() // otherwise sorting is unsafe
+
+ for _, stmt := range f.Syntax.Stmt {
+ block, ok := stmt.(*LineBlock)
+ if !ok {
+ continue
+ }
+ sort.SliceStable(block.Line, func(i, j int) bool {
+ return lineLess(block.Line[i], block.Line[j])
+ })
+ }
+}
+
+// removeDups removes duplicate replace directives.
+//
+// Later replace directives take priority.
+//
+// require directives are not de-duplicated. That's left up to higher-level
+// logic (MVS).
+//
+// retract directives are not de-duplicated since comments are
+// meaningful, and versions may be retracted multiple times.
+func (f *WorkFile) removeDups() {
+ removeDups(f.Syntax, nil, &f.Replace)
+}
diff --git a/libgo/go/golang.org/x/mod/module/module.go b/libgo/go/golang.org/x/mod/module/module.go
index ba97ac356e9..355b5a45685 100644
--- a/libgo/go/golang.org/x/mod/module/module.go
+++ b/libgo/go/golang.org/x/mod/module/module.go
@@ -286,12 +286,7 @@ func fileNameOK(r rune) bool {
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
- for i := 0; i < len(allowed); i++ {
- if rune(allowed[i]) == r {
- return true
- }
- }
- return false
+ return strings.ContainsRune(allowed, r)
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
@@ -803,6 +798,7 @@ func unescapeString(escaped string) (string, bool) {
// GOPRIVATE environment variable, as described by 'go help module-private'.
//
// It ignores any empty or malformed patterns in the list.
+// Trailing slashes on patterns are ignored.
func MatchPrefixPatterns(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
@@ -812,6 +808,7 @@ func MatchPrefixPatterns(globs, target string) bool {
} else {
glob, globs = globs, ""
}
+ glob = strings.TrimSuffix(glob, "/")
if glob == "" {
continue
}
diff --git a/libgo/go/golang.org/x/mod/semver/semver.go b/libgo/go/golang.org/x/mod/semver/semver.go
index 7be398f80d3..a30a22bf20f 100644
--- a/libgo/go/golang.org/x/mod/semver/semver.go
+++ b/libgo/go/golang.org/x/mod/semver/semver.go
@@ -32,7 +32,6 @@ type parsed struct {
short string
prerelease string
build string
- err string
}
// IsValid reports whether v is a valid semantic version string.
@@ -172,12 +171,10 @@ func Sort(list []string) {
func parse(v string) (p parsed, ok bool) {
if v == "" || v[0] != 'v' {
- p.err = "missing v prefix"
return
}
p.major, v, ok = parseInt(v[1:])
if !ok {
- p.err = "bad major version"
return
}
if v == "" {
@@ -187,13 +184,11 @@ func parse(v string) (p parsed, ok bool) {
return
}
if v[0] != '.' {
- p.err = "bad minor prefix"
ok = false
return
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
- p.err = "bad minor version"
return
}
if v == "" {
@@ -202,31 +197,26 @@ func parse(v string) (p parsed, ok bool) {
return
}
if v[0] != '.' {
- p.err = "bad patch prefix"
ok = false
return
}
p.patch, v, ok = parseInt(v[1:])
if !ok {
- p.err = "bad patch version"
return
}
if len(v) > 0 && v[0] == '-' {
p.prerelease, v, ok = parsePrerelease(v)
if !ok {
- p.err = "bad prerelease"
return
}
}
if len(v) > 0 && v[0] == '+' {
p.build, v, ok = parseBuild(v)
if !ok {
- p.err = "bad build"
return
}
}
if v != "" {
- p.err = "junk on end"
ok = false
return
}
diff --git a/libgo/go/golang.org/x/mod/zip/zip.go b/libgo/go/golang.org/x/mod/zip/zip.go
index 5b401ad4d8f..ca0f7ad42f1 100644
--- a/libgo/go/golang.org/x/mod/zip/zip.go
+++ b/libgo/go/golang.org/x/mod/zip/zip.go
@@ -53,6 +53,7 @@ import (
"io"
"io/ioutil"
"os"
+ "os/exec"
"path"
"path/filepath"
"strings"
@@ -192,8 +193,10 @@ func CheckFiles(files []File) (CheckedFiles, error) {
}
// checkFiles implements CheckFiles and also returns lists of valid files and
-// their sizes, corresponding to cf.Valid. These lists are used in Crewate to
-// avoid repeated calls to File.Lstat.
+// their sizes, corresponding to cf.Valid. It omits files in submodules, files
+// in vendored packages, symlinked files, and various other unwanted files.
+//
+// The lists returned are used in Create to avoid repeated calls to File.Lstat.
func checkFiles(files []File) (cf CheckedFiles, validFiles []File, validSizes []int64) {
errPaths := make(map[string]struct{})
addError := func(path string, omitted bool, err error) {
@@ -254,10 +257,12 @@ func checkFiles(files []File) (cf CheckedFiles, validFiles []File, validSizes []
continue
}
if isVendoredPackage(p) {
+ // Skip files in vendored packages.
addError(p, true, errVendored)
continue
}
if inSubmodule(p) {
+ // Skip submodule files.
addError(p, true, errSubmoduleFile)
continue
}
@@ -551,7 +556,7 @@ func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) {
if zerr, ok := err.(*zipError); ok {
zerr.path = dir
} else if err != nil {
- err = &zipError{verb: "create zip", path: dir, err: err}
+ err = &zipError{verb: "create zip from directory", path: dir, err: err}
}
}()
@@ -563,6 +568,129 @@ func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) {
return Create(w, m, files)
}
+// CreateFromVCS creates a module zip file for module m from the contents of a
+// VCS repository stored locally. The zip content is written to w.
+//
+// repoRoot must be an absolute path to the base of the repository, such as
+// "/Users/some-user/some-repo".
+//
+// revision is the revision of the repository to create the zip from. Examples
+// include HEAD or SHA sums for git repositories.
+//
+// subdir must be the relative path from the base of the repository, such as
+// "sub/dir". To create a zip from the base of the repository, pass an empty
+// string.
+//
+// If CreateFromVCS returns ErrUnrecognizedVCS, consider falling back to
+// CreateFromDir.
+func CreateFromVCS(w io.Writer, m module.Version, repoRoot, revision, subdir string) (err error) {
+ defer func() {
+ if zerr, ok := err.(*zipError); ok {
+ zerr.path = repoRoot
+ } else if err != nil {
+ err = &zipError{verb: "create zip from version control system", path: repoRoot, err: err}
+ }
+ }()
+
+ var filesToCreate []File
+
+ switch {
+ case isGitRepo(repoRoot):
+ files, err := filesInGitRepo(repoRoot, revision, subdir)
+ if err != nil {
+ return err
+ }
+
+ filesToCreate = files
+ default:
+ return &UnrecognizedVCSError{RepoRoot: repoRoot}
+ }
+
+ return Create(w, m, filesToCreate)
+}
+
+// UnrecognizedVCSError indicates that no recognized version control system was
+// found in the given directory.
+type UnrecognizedVCSError struct {
+ RepoRoot string
+}
+
+func (e *UnrecognizedVCSError) Error() string {
+ return fmt.Sprintf("could not find a recognized version control system at %q", e.RepoRoot)
+}
+
+// filterGitIgnored filters out any files that are git ignored in the directory.
+func filesInGitRepo(dir, rev, subdir string) ([]File, error) {
+ stderr := bytes.Buffer{}
+ stdout := bytes.Buffer{}
+
+ // Incredibly, git produces different archives depending on whether
+ // it is running on a Windows system or not, in an attempt to normalize
+ // text file line endings. Setting -c core.autocrlf=input means only
+ // translate files on the way into the repo, not on the way out (archive).
+ // The -c core.eol=lf should be unnecessary but set it anyway.
+ //
+ // Note: We use git archive to understand which files are actually included,
+ // ignoring things like .gitignore'd files. We could also use other
+ // techniques like git ls-files, but this approach most closely matches what
+ // the Go command does, which is beneficial.
+ //
+ // Note: some of this code copied from https://go.googlesource.com/go/+/refs/tags/go1.16.5/src/cmd/go/internal/modfetch/codehost/git.go#826.
+ cmd := exec.Command("git", "-c", "core.autocrlf=input", "-c", "core.eol=lf", "archive", "--format=zip", rev)
+ if subdir != "" {
+ cmd.Args = append(cmd.Args, subdir)
+ }
+ cmd.Dir = dir
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("error running `git archive`: %w, %s", err, stderr.String())
+ }
+
+ rawReader := bytes.NewReader(stdout.Bytes())
+ zipReader, err := zip.NewReader(rawReader, int64(stdout.Len()))
+ if err != nil {
+ return nil, err
+ }
+
+ var fs []File
+ for _, zf := range zipReader.File {
+ if !strings.HasPrefix(zf.Name, subdir) || strings.HasSuffix(zf.Name, "/") {
+ continue
+ }
+
+ n := strings.TrimPrefix(zf.Name, subdir)
+ if n == "" {
+ continue
+ }
+ n = strings.TrimPrefix(n, string(filepath.Separator))
+
+ fs = append(fs, zipFile{
+ name: n,
+ f: zf,
+ })
+ }
+
+ return fs, nil
+}
+
+// isGitRepo reports whether the given directory is a git repo.
+func isGitRepo(dir string) bool {
+ stdout := &bytes.Buffer{}
+ cmd := exec.Command("git", "rev-parse", "--git-dir")
+ cmd.Dir = dir
+ cmd.Stdout = stdout
+ if err := cmd.Run(); err != nil {
+ return false
+ }
+ gitDir := strings.TrimSpace(string(stdout.Bytes()))
+ if !filepath.IsAbs(gitDir) {
+ gitDir = filepath.Join(dir, gitDir)
+ }
+ wantDir := filepath.Join(dir, ".git")
+ return wantDir == gitDir
+}
+
type dirFile struct {
filePath, slashPath string
info os.FileInfo
@@ -572,6 +700,15 @@ func (f dirFile) Path() string { return f.slashPath }
func (f dirFile) Lstat() (os.FileInfo, error) { return f.info, nil }
func (f dirFile) Open() (io.ReadCloser, error) { return os.Open(f.filePath) }
+type zipFile struct {
+ name string
+ f *zip.File
+}
+
+func (f zipFile) Path() string { return f.name }
+func (f zipFile) Lstat() (os.FileInfo, error) { return f.f.FileInfo(), nil }
+func (f zipFile) Open() (io.ReadCloser, error) { return f.f.Open() }
+
// isVendoredPackage attempts to report whether the given filename is contained
// in a package whose import path contains (but does not end with) the component
// "vendor".
diff --git a/libgo/go/golang.org/x/net/dns/dnsmessage/message.go b/libgo/go/golang.org/x/net/dns/dnsmessage/message.go
index 1736fc5d12e..8c24430c5ce 100644
--- a/libgo/go/golang.org/x/net/dns/dnsmessage/message.go
+++ b/libgo/go/golang.org/x/net/dns/dnsmessage/message.go
@@ -125,14 +125,14 @@ func (o OpCode) GoString() string {
// An RCode is a DNS response status code.
type RCode uint16
+// Header.RCode values.
const (
- // Message.Rcode
- RCodeSuccess RCode = 0
- RCodeFormatError RCode = 1
- RCodeServerFailure RCode = 2
- RCodeNameError RCode = 3
- RCodeNotImplemented RCode = 4
- RCodeRefused RCode = 5
+ RCodeSuccess RCode = 0 // NoError
+ RCodeFormatError RCode = 1 // FormErr
+ RCodeServerFailure RCode = 2 // ServFail
+ RCodeNameError RCode = 3 // NXDomain
+ RCodeNotImplemented RCode = 4 // NotImp
+ RCodeRefused RCode = 5 // Refused
)
var rCodeNames = map[RCode]string{
@@ -1207,8 +1207,8 @@ type Builder struct {
//
// The DNS message is appended to the provided initial buffer buf (which may be
// nil) as it is built. The final message is returned by the (*Builder).Finish
-// method, which may return the same underlying array if there was sufficient
-// capacity in the slice.
+// method, which includes buf[:len(buf)] and may return the same underlying
+// array if there was sufficient capacity in the slice.
func NewBuilder(buf []byte, h Header) Builder {
if buf == nil {
buf = make([]byte, 0, packStartingCap)
@@ -1713,7 +1713,7 @@ const (
// SetEDNS0 configures h for EDNS(0).
//
-// The provided extRCode must be an extedned RCode.
+// The provided extRCode must be an extended RCode.
func (h *ResourceHeader) SetEDNS0(udpPayloadLen int, extRCode RCode, dnssecOK bool) error {
h.Name = Name{Data: [nameLen]byte{'.'}, Length: 1} // RFC 6891 section 6.1.2
h.Type = TypeOPT
@@ -1880,7 +1880,7 @@ const nameLen = 255
// A Name is a non-encoded domain name. It is used instead of strings to avoid
// allocations.
type Name struct {
- Data [nameLen]byte
+ Data [nameLen]byte // 255 bytes
Length uint8
}
diff --git a/libgo/go/golang.org/x/net/http/httpproxy/proxy.go b/libgo/go/golang.org/x/net/http/httpproxy/proxy.go
index 1415b077912..d2c8c87eab9 100644
--- a/libgo/go/golang.org/x/net/http/httpproxy/proxy.go
+++ b/libgo/go/golang.org/x/net/http/httpproxy/proxy.go
@@ -113,8 +113,8 @@ func getEnvAny(names ...string) string {
// environment, or a proxy should not be used for the given request, as
// defined by NO_PROXY.
//
-// As a special case, if req.URL.Host is "localhost" (with or without a
-// port number), then a nil URL and nil error will be returned.
+// As a special case, if req.URL.Host is "localhost" or a loopback address
+// (with or without a port number), then a nil URL and nil error will be returned.
func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) {
// Preprocess the Config settings for more efficient evaluation.
cfg1 := &config{
diff --git a/libgo/go/golang.org/x/net/http2/hpack/huffman.go b/libgo/go/golang.org/x/net/http2/hpack/huffman.go
index a1ab2f05679..fe0b84ccd46 100644
--- a/libgo/go/golang.org/x/net/http2/hpack/huffman.go
+++ b/libgo/go/golang.org/x/net/http2/hpack/huffman.go
@@ -140,25 +140,29 @@ func buildRootHuffmanNode() {
panic("unexpected size")
}
lazyRootHuffmanNode = newInternalNode()
- for i, code := range huffmanCodes {
- addDecoderNode(byte(i), code, huffmanCodeLen[i])
- }
-}
+ // allocate a leaf node for each of the 256 symbols
+ leaves := new([256]node)
+
+ for sym, code := range huffmanCodes {
+ codeLen := huffmanCodeLen[sym]
+
+ cur := lazyRootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
-func addDecoderNode(sym byte, code uint32, codeLen uint8) {
- cur := lazyRootHuffmanNode
- for codeLen > 8 {
- codeLen -= 8
- i := uint8(code >> codeLen)
- if cur.children[i] == nil {
- cur.children[i] = newInternalNode()
+ leaves[sym].sym = byte(sym)
+ leaves[sym].codeLen = codeLen
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &leaves[sym]
}
- cur = cur.children[i]
- }
- shift := 8 - codeLen
- start, end := int(uint8(code<<shift)), int(1<<shift)
- for i := start; i < start+end; i++ {
- cur.children[i] = &node{sym: sym, codeLen: codeLen}
}
}
diff --git a/libgo/go/golang.org/x/net/idna/go118.go b/libgo/go/golang.org/x/net/idna/go118.go
new file mode 100644
index 00000000000..c5c4338dbed
--- /dev/null
+++ b/libgo/go/golang.org/x/net/idna/go118.go
@@ -0,0 +1,14 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package idna
+
+// Transitional processing is disabled by default in Go 1.18.
+// https://golang.org/issue/47510
+const transitionalLookup = false
diff --git a/libgo/go/golang.org/x/net/idna/idna10.0.0.go b/libgo/go/golang.org/x/net/idna/idna10.0.0.go
index 5208ba6cb88..64ccf85febb 100644
--- a/libgo/go/golang.org/x/net/idna/idna10.0.0.go
+++ b/libgo/go/golang.org/x/net/idna/idna10.0.0.go
@@ -59,10 +59,10 @@ type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
-// compatibility. It is used by most browsers when resolving domain names. This
+// compatibility. It is used by some browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
- return func(o *options) { o.transitional = true }
+ return func(o *options) { o.transitional = transitional }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
@@ -284,7 +284,7 @@ var (
punycode = &Profile{}
lookup = &Profile{options{
- transitional: true,
+ transitional: transitionalLookup,
useSTD3Rules: true,
checkHyphens: true,
checkJoiners: true,
diff --git a/libgo/go/golang.org/x/net/idna/idna9.0.0.go b/libgo/go/golang.org/x/net/idna/idna9.0.0.go
index 55f718f1274..aae6aac872b 100644
--- a/libgo/go/golang.org/x/net/idna/idna9.0.0.go
+++ b/libgo/go/golang.org/x/net/idna/idna9.0.0.go
@@ -58,10 +58,10 @@ type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
-// compatibility. It is used by most browsers when resolving domain names. This
+// compatibility. It is used by some browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
- return func(o *options) { o.transitional = true }
+ return func(o *options) { o.transitional = transitional }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
diff --git a/libgo/go/golang.org/x/net/idna/pre_go118.go b/libgo/go/golang.org/x/net/idna/pre_go118.go
new file mode 100644
index 00000000000..3aaccab1c5a
--- /dev/null
+++ b/libgo/go/golang.org/x/net/idna/pre_go118.go
@@ -0,0 +1,12 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package idna
+
+const transitionalLookup = true
diff --git a/libgo/go/golang.org/x/net/idna/punycode.go b/libgo/go/golang.org/x/net/idna/punycode.go
index 02c7d59af3b..e8e3ac11a94 100644
--- a/libgo/go/golang.org/x/net/idna/punycode.go
+++ b/libgo/go/golang.org/x/net/idna/punycode.go
@@ -49,6 +49,7 @@ func decode(encoded string) (string, error) {
}
}
i, n, bias := int32(0), initialN, initialBias
+ overflow := false
for pos < len(encoded) {
oldI, w := i, int32(1)
for k := base; ; k += base {
@@ -60,29 +61,32 @@ func decode(encoded string) (string, error) {
return "", punyError(encoded)
}
pos++
- i += digit * w
- if i < 0 {
+ i, overflow = madd(i, digit, w)
+ if overflow {
return "", punyError(encoded)
}
t := k - bias
- if t < tmin {
+ if k <= bias {
t = tmin
- } else if t > tmax {
+ } else if k >= bias+tmax {
t = tmax
}
if digit < t {
break
}
- w *= base - t
- if w >= math.MaxInt32/base {
+ w, overflow = madd(0, w, base-t)
+ if overflow {
return "", punyError(encoded)
}
}
+ if len(output) >= 1024 {
+ return "", punyError(encoded)
+ }
x := int32(len(output) + 1)
bias = adapt(i-oldI, x, oldI == 0)
n += i / x
i %= x
- if n > utf8.MaxRune || len(output) >= 1024 {
+ if n < 0 || n > utf8.MaxRune {
return "", punyError(encoded)
}
output = append(output, 0)
@@ -115,6 +119,7 @@ func encode(prefix, s string) (string, error) {
if b > 0 {
output = append(output, '-')
}
+ overflow := false
for remaining != 0 {
m := int32(0x7fffffff)
for _, r := range s {
@@ -122,8 +127,8 @@ func encode(prefix, s string) (string, error) {
m = r
}
}
- delta += (m - n) * (h + 1)
- if delta < 0 {
+ delta, overflow = madd(delta, m-n, h+1)
+ if overflow {
return "", punyError(s)
}
n = m
@@ -141,9 +146,9 @@ func encode(prefix, s string) (string, error) {
q := delta
for k := base; ; k += base {
t := k - bias
- if t < tmin {
+ if k <= bias {
t = tmin
- } else if t > tmax {
+ } else if k >= bias+tmax {
t = tmax
}
if q < t {
@@ -164,6 +169,15 @@ func encode(prefix, s string) (string, error) {
return string(output), nil
}
+// madd computes a + (b * c), detecting overflow.
+func madd(a, b, c int32) (next int32, overflow bool) {
+ p := int64(b) * int64(c)
+ if p > math.MaxInt32-int64(a) {
+ return 0, true
+ }
+ return a + int32(p), false
+}
+
func decodeDigit(x byte) (digit int32, ok bool) {
switch {
case '0' <= x && x <= '9':
diff --git a/libgo/go/golang.org/x/net/lif/address.go b/libgo/go/golang.org/x/net/lif/address.go
index afb957fd8e1..34b6432d6e2 100644
--- a/libgo/go/golang.org/x/net/lif/address.go
+++ b/libgo/go/golang.org/x/net/lif/address.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package lif
diff --git a/libgo/go/golang.org/x/net/lif/binary.go b/libgo/go/golang.org/x/net/lif/binary.go
index 738a94f4224..f31ca3ad072 100644
--- a/libgo/go/golang.org/x/net/lif/binary.go
+++ b/libgo/go/golang.org/x/net/lif/binary.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package lif
diff --git a/libgo/go/golang.org/x/net/lif/lif.go b/libgo/go/golang.org/x/net/lif/lif.go
index 6e81f81f1c2..95c7d258466 100644
--- a/libgo/go/golang.org/x/net/lif/lif.go
+++ b/libgo/go/golang.org/x/net/lif/lif.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
// Package lif provides basic functions for the manipulation of
diff --git a/libgo/go/golang.org/x/net/lif/link.go b/libgo/go/golang.org/x/net/lif/link.go
index 913a53e1185..f1af1306ca3 100644
--- a/libgo/go/golang.org/x/net/lif/link.go
+++ b/libgo/go/golang.org/x/net/lif/link.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package lif
diff --git a/libgo/go/golang.org/x/net/lif/sys.go b/libgo/go/golang.org/x/net/lif/sys.go
index c896041b7b4..d0b532d9dc9 100644
--- a/libgo/go/golang.org/x/net/lif/sys.go
+++ b/libgo/go/golang.org/x/net/lif/sys.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package lif
diff --git a/libgo/go/golang.org/x/net/lif/syscall.go b/libgo/go/golang.org/x/net/lif/syscall.go
index ea7541456bd..d2e9e619547 100644
--- a/libgo/go/golang.org/x/net/lif/syscall.go
+++ b/libgo/go/golang.org/x/net/lif/syscall.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package lif
diff --git a/libgo/go/golang.org/x/net/nettest/nettest.go b/libgo/go/golang.org/x/net/nettest/nettest.go
index 83ba858e249..ae5413b23df 100644
--- a/libgo/go/golang.org/x/net/nettest/nettest.go
+++ b/libgo/go/golang.org/x/net/nettest/nettest.go
@@ -95,13 +95,8 @@ func TestableNetwork(network string) bool {
// This is an internal network name for testing on the
// package net of the standard library.
switch runtime.GOOS {
- case "android", "fuchsia", "hurd", "js", "nacl", "plan9", "windows":
+ case "android", "fuchsia", "hurd", "ios", "js", "nacl", "plan9", "windows":
return false
- case "darwin", "ios":
- // iOS doesn't support it.
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- return false
- }
}
case "ip", "ip4", "ip6":
switch runtime.GOOS {
@@ -114,15 +109,10 @@ func TestableNetwork(network string) bool {
}
case "unix", "unixgram":
switch runtime.GOOS {
- case "android", "fuchsia", "hurd", "js", "nacl", "plan9", "windows":
+ case "android", "fuchsia", "hurd", "ios", "js", "nacl", "plan9", "windows":
return false
case "aix":
return unixStrmDgramEnabled()
- case "darwin", "ios":
- // iOS does not support unix, unixgram.
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- return false
- }
}
case "unixpacket":
switch runtime.GOOS {
diff --git a/libgo/go/golang.org/x/net/route/address.go b/libgo/go/golang.org/x/net/route/address.go
index 4f6ad968a24..1898ed0fad5 100644
--- a/libgo/go/golang.org/x/net/route/address.go
+++ b/libgo/go/golang.org/x/net/route/address.go
@@ -422,5 +422,9 @@ func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) (
b = b[l:]
}
}
+ // The only remaining bytes in b should be alignment.
+ // However, under some circumstances DragonFly BSD appears to put
+ // more addresses in the message than are indicated in the address
+ // bitmask, so don't check for this.
return as[:], nil
}
diff --git a/libgo/go/golang.org/x/net/route/message.go b/libgo/go/golang.org/x/net/route/message.go
index d53bb7f9b13..456a8363fec 100644
--- a/libgo/go/golang.org/x/net/route/message.go
+++ b/libgo/go/golang.org/x/net/route/message.go
@@ -53,7 +53,7 @@ func ParseRIB(typ RIBType, b []byte) ([]Message, error) {
if w, ok := wireFormats[int(b[3])]; !ok {
nskips++
} else {
- m, err := w.parse(typ, b)
+ m, err := w.parse(typ, b[:l])
if err != nil {
return nil, err
}
diff --git a/libgo/go/golang.org/x/net/route/sys_freebsd.go b/libgo/go/golang.org/x/net/route/sys_freebsd.go
index fe91be1249c..3599601740c 100644
--- a/libgo/go/golang.org/x/net/route/sys_freebsd.go
+++ b/libgo/go/golang.org/x/net/route/sys_freebsd.go
@@ -134,9 +134,6 @@ func probeRoutingStack() (int, map[int]*wireFormat) {
} else {
ifm.bodyOff = sizeofIfMsghdrFreeBSD11
}
- if rel >= 1102000 { // see https://github.com/freebsd/freebsd/commit/027c7f4d66ff8d8c4a46c3665a5ee7d6d8462034#diff-ad4e5b7f1449ea3fc87bc97280de145b
- align = wordSize
- }
}
rtm.parse = rtm.parseRouteMessage
ifm.parse = ifm.parseInterfaceMessage
diff --git a/libgo/go/golang.org/x/net/route/syscall.go b/libgo/go/golang.org/x/net/route/syscall.go
index 97166dd3c4c..68d37c9621c 100644
--- a/libgo/go/golang.org/x/net/route/syscall.go
+++ b/libgo/go/golang.org/x/net/route/syscall.go
@@ -2,28 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build dragonfly || freebsd || netbsd || openbsd
-// +build dragonfly freebsd netbsd openbsd
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd
+// +build darwin dragonfly freebsd netbsd openbsd
package route
-import (
- "syscall"
- "unsafe"
-)
+import _ "unsafe" // for linkname
-var zero uintptr
-
-func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
- var p unsafe.Pointer
- if len(mib) > 0 {
- p = unsafe.Pointer(&mib[0])
- } else {
- p = unsafe.Pointer(&zero)
- }
- _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen)
- if errno != 0 {
- return error(errno)
- }
- return nil
-}
+//go:linkname sysctl syscall.sysctl
+func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error
diff --git a/libgo/go/golang.org/x/sync/AUTHORS b/libgo/go/golang.org/x/sync/AUTHORS
new file mode 100644
index 00000000000..15167cd746c
--- /dev/null
+++ b/libgo/go/golang.org/x/sync/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/libgo/go/golang.org/x/sync/CONTRIBUTORS b/libgo/go/golang.org/x/sync/CONTRIBUTORS
new file mode 100644
index 00000000000..1c4577e9680
--- /dev/null
+++ b/libgo/go/golang.org/x/sync/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/libgo/go/golang.org/x/sync/LICENSE b/libgo/go/golang.org/x/sync/LICENSE
new file mode 100644
index 00000000000..6a66aea5eaf
--- /dev/null
+++ b/libgo/go/golang.org/x/sync/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/libgo/go/golang.org/x/sync/PATENTS b/libgo/go/golang.org/x/sync/PATENTS
new file mode 100644
index 00000000000..733099041f8
--- /dev/null
+++ b/libgo/go/golang.org/x/sync/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/libgo/go/golang.org/x/sync/semaphore/semaphore.go b/libgo/go/golang.org/x/sync/semaphore/semaphore.go
new file mode 100644
index 00000000000..30f632c577b
--- /dev/null
+++ b/libgo/go/golang.org/x/sync/semaphore/semaphore.go
@@ -0,0 +1,136 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semaphore provides a weighted semaphore implementation.
+package semaphore // import "golang.org/x/sync/semaphore"
+
+import (
+ "container/list"
+ "context"
+ "sync"
+)
+
+type waiter struct {
+ n int64
+ ready chan<- struct{} // Closed when semaphore acquired.
+}
+
+// NewWeighted creates a new weighted semaphore with the given
+// maximum combined weight for concurrent access.
+func NewWeighted(n int64) *Weighted {
+ w := &Weighted{size: n}
+ return w
+}
+
+// Weighted provides a way to bound concurrent access to a resource.
+// The callers can request access with a given weight.
+type Weighted struct {
+ size int64
+ cur int64
+ mu sync.Mutex
+ waiters list.List
+}
+
+// Acquire acquires the semaphore with a weight of n, blocking until resources
+// are available or ctx is done. On success, returns nil. On failure, returns
+// ctx.Err() and leaves the semaphore unchanged.
+//
+// If ctx is already done, Acquire may still succeed without blocking.
+func (s *Weighted) Acquire(ctx context.Context, n int64) error {
+ s.mu.Lock()
+ if s.size-s.cur >= n && s.waiters.Len() == 0 {
+ s.cur += n
+ s.mu.Unlock()
+ return nil
+ }
+
+ if n > s.size {
+ // Don't make other Acquire calls block on one that's doomed to fail.
+ s.mu.Unlock()
+ <-ctx.Done()
+ return ctx.Err()
+ }
+
+ ready := make(chan struct{})
+ w := waiter{n: n, ready: ready}
+ elem := s.waiters.PushBack(w)
+ s.mu.Unlock()
+
+ select {
+ case <-ctx.Done():
+ err := ctx.Err()
+ s.mu.Lock()
+ select {
+ case <-ready:
+ // Acquired the semaphore after we were canceled. Rather than trying to
+ // fix up the queue, just pretend we didn't notice the cancelation.
+ err = nil
+ default:
+ isFront := s.waiters.Front() == elem
+ s.waiters.Remove(elem)
+ // If we're at the front and there're extra tokens left, notify other waiters.
+ if isFront && s.size > s.cur {
+ s.notifyWaiters()
+ }
+ }
+ s.mu.Unlock()
+ return err
+
+ case <-ready:
+ return nil
+ }
+}
+
+// TryAcquire acquires the semaphore with a weight of n without blocking.
+// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
+func (s *Weighted) TryAcquire(n int64) bool {
+ s.mu.Lock()
+ success := s.size-s.cur >= n && s.waiters.Len() == 0
+ if success {
+ s.cur += n
+ }
+ s.mu.Unlock()
+ return success
+}
+
+// Release releases the semaphore with a weight of n.
+func (s *Weighted) Release(n int64) {
+ s.mu.Lock()
+ s.cur -= n
+ if s.cur < 0 {
+ s.mu.Unlock()
+ panic("semaphore: released more than held")
+ }
+ s.notifyWaiters()
+ s.mu.Unlock()
+}
+
+func (s *Weighted) notifyWaiters() {
+ for {
+ next := s.waiters.Front()
+ if next == nil {
+ break // No more waiters blocked.
+ }
+
+ w := next.Value.(waiter)
+ if s.size-s.cur < w.n {
+ // Not enough tokens for the next waiter. We could keep going (to try to
+ // find a waiter with a smaller request), but under load that could cause
+ // starvation for large requests; instead, we leave all remaining waiters
+ // blocked.
+ //
+ // Consider a semaphore used as a read-write lock, with N tokens, N
+ // readers, and one writer. Each reader can Acquire(1) to obtain a read
+ // lock. The writer can Acquire(N) to obtain a write lock, excluding all
+ // of the readers. If we allow the readers to jump ahead in the queue,
+ // the writer will starve β€” there is always one token available for every
+ // reader.
+ break
+ }
+
+ s.cur += w.n
+ s.waiters.Remove(next)
+ close(w.ready)
+ }
+}
diff --git a/libgo/go/golang.org/x/sys/cpu/cpu.go b/libgo/go/golang.org/x/sys/cpu/cpu.go
index abbec2d44bf..b56886f2616 100644
--- a/libgo/go/golang.org/x/sys/cpu/cpu.go
+++ b/libgo/go/golang.org/x/sys/cpu/cpu.go
@@ -56,6 +56,7 @@ var X86 struct {
HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
HasBMI1 bool // Bit manipulation instruction set 1
HasBMI2 bool // Bit manipulation instruction set 2
+ HasCX16 bool // Compare and exchange 16 Bytes
HasERMS bool // Enhanced REP for MOVSB and STOSB
HasFMA bool // Fused-multiply-add instructions
HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
diff --git a/libgo/go/golang.org/x/sys/cpu/cpu_gc_x86.go b/libgo/go/golang.org/x/sys/cpu/cpu_gc_x86.go
index 3298a87e981..fa7cdb9bcd5 100644
--- a/libgo/go/golang.org/x/sys/cpu/cpu_gc_x86.go
+++ b/libgo/go/golang.org/x/sys/cpu/cpu_gc_x86.go
@@ -15,7 +15,3 @@ func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func xgetbv() (eax, edx uint32)
-
-// darwinSupportsAVX512 is implemented in cpu_x86.s for gc compiler
-// and in cpu_gccgo_x86.go for gccgo.
-func darwinSupportsAVX512() bool
diff --git a/libgo/go/golang.org/x/sys/cpu/cpu_x86.go b/libgo/go/golang.org/x/sys/cpu/cpu_x86.go
index e50ed987599..1a99e94fee0 100644
--- a/libgo/go/golang.org/x/sys/cpu/cpu_x86.go
+++ b/libgo/go/golang.org/x/sys/cpu/cpu_x86.go
@@ -39,6 +39,7 @@ func initOptions() {
{Name: "avx512bf16", Feature: &X86.HasAVX512BF16},
{Name: "bmi1", Feature: &X86.HasBMI1},
{Name: "bmi2", Feature: &X86.HasBMI2},
+ {Name: "cx16", Feature: &X86.HasCX16},
{Name: "erms", Feature: &X86.HasERMS},
{Name: "fma", Feature: &X86.HasFMA},
{Name: "osxsave", Feature: &X86.HasOSXSAVE},
@@ -73,6 +74,7 @@ func archInit() {
X86.HasPCLMULQDQ = isSet(1, ecx1)
X86.HasSSSE3 = isSet(9, ecx1)
X86.HasFMA = isSet(12, ecx1)
+ X86.HasCX16 = isSet(13, ecx1)
X86.HasSSE41 = isSet(19, ecx1)
X86.HasSSE42 = isSet(20, ecx1)
X86.HasPOPCNT = isSet(23, ecx1)
@@ -88,9 +90,10 @@ func archInit() {
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
if runtime.GOOS == "darwin" {
- // Check darwin commpage for AVX512 support. Necessary because:
- // https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201
- osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
+ // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers.
+ // Since users can't rely on mask register contents, let's not advertise AVX-512 support.
+ // See issue 49233.
+ osSupportsAVX512 = false
} else {
// Check if OPMASK and ZMM registers have OS support.
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
diff --git a/libgo/go/golang.org/x/text/unicode/bidi/core.go b/libgo/go/golang.org/x/text/unicode/bidi/core.go
index e4c0811016c..fde188a33b0 100644
--- a/libgo/go/golang.org/x/text/unicode/bidi/core.go
+++ b/libgo/go/golang.org/x/text/unicode/bidi/core.go
@@ -495,9 +495,9 @@ func (s *isolatingRunSequence) resolveWeakTypes() {
if t == NSM {
s.types[i] = precedingCharacterType
} else {
- if t.in(LRI, RLI, FSI, PDI) {
- precedingCharacterType = ON
- }
+ // if t.in(LRI, RLI, FSI, PDI) {
+ // precedingCharacterType = ON
+ // }
precedingCharacterType = t
}
}
diff --git a/libgo/go/golang.org/x/tools/cover/profile.go b/libgo/go/golang.org/x/tools/cover/profile.go
index 57195774cea..47a9a541164 100644
--- a/libgo/go/golang.org/x/tools/cover/profile.go
+++ b/libgo/go/golang.org/x/tools/cover/profile.go
@@ -10,6 +10,7 @@ import (
"bufio"
"errors"
"fmt"
+ "io"
"math"
"os"
"sort"
@@ -45,14 +46,18 @@ func ParseProfiles(fileName string) ([]*Profile, error) {
return nil, err
}
defer pf.Close()
+ return ParseProfilesFromReader(pf)
+}
- files := make(map[string]*Profile)
- buf := bufio.NewReader(pf)
+// ParseProfilesFromReader parses profile data from the Reader and
+// returns a Profile for each source file described therein.
+func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) {
// First line is "mode: foo", where foo is "set", "count", or "atomic".
// Rest of file is in the format
// encoding/base64/base64.go:34.44,37.40 3 1
// where the fields are: name.go:line.column,line.column numberOfStatements count
- s := bufio.NewScanner(buf)
+ files := make(map[string]*Profile)
+ s := bufio.NewScanner(rd)
mode := ""
for s.Scan() {
line := s.Text()
diff --git a/libgo/go/golang.org/x/tools/go/analysis/internal/facts/imports.go b/libgo/go/golang.org/x/tools/go/analysis/internal/facts/imports.go
index 34740f48e04..ade0cc6fab4 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/internal/facts/imports.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/internal/facts/imports.go
@@ -4,7 +4,11 @@
package facts
-import "go/types"
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
// importMap computes the import map for a package by traversing the
// entire exported API each of its imports.
@@ -42,9 +46,20 @@ func importMap(imports []*types.Package) map[string]*types.Package {
// nop
case *types.Named:
if addObj(T.Obj()) {
+ // TODO(taking): Investigate why the Underlying type is not added here.
for i := 0; i < T.NumMethods(); i++ {
addObj(T.Method(i))
}
+ if tparams := typeparams.ForNamed(T); tparams != nil {
+ for i := 0; i < tparams.Len(); i++ {
+ addType(tparams.At(i))
+ }
+ }
+ if targs := typeparams.NamedTypeArgs(T); targs != nil {
+ for i := 0; i < targs.Len(); i++ {
+ addType(targs.At(i))
+ }
+ }
}
case *types.Pointer:
addType(T.Elem())
@@ -60,6 +75,11 @@ func importMap(imports []*types.Package) map[string]*types.Package {
case *types.Signature:
addType(T.Params())
addType(T.Results())
+ if tparams := typeparams.ForSignature(T); tparams != nil {
+ for i := 0; i < tparams.Len(); i++ {
+ addType(tparams.At(i))
+ }
+ }
case *types.Struct:
for i := 0; i < T.NumFields(); i++ {
addObj(T.Field(i))
@@ -72,6 +92,17 @@ func importMap(imports []*types.Package) map[string]*types.Package {
for i := 0; i < T.NumMethods(); i++ {
addObj(T.Method(i))
}
+ for i := 0; i < T.NumEmbeddeds(); i++ {
+ addType(T.EmbeddedType(i)) // walk Embedded for implicits
+ }
+ case *typeparams.Union:
+ for i := 0; i < T.Len(); i++ {
+ addType(T.Term(i).Type())
+ }
+ case *typeparams.TypeParam:
+ if addObj(T.Obj()) {
+ addType(T.Constraint())
+ }
}
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/libgo/go/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
index 34c7a36234d..3997ac861df 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
@@ -51,6 +51,11 @@ type asmArch struct {
bigEndian bool
stack string
lr bool
+ // retRegs is a list of registers for return value in register ABI (ABIInternal).
+ // For now, as we only check whether we write to any result, here we only need to
+ // include the first integer register and first floating-point register. Accessing
+ // any of them counts as writing to result.
+ retRegs []string
// calculated during initialization
sizes types.Sizes
intSize int
@@ -79,8 +84,8 @@ type asmVar struct {
var (
asmArch386 = asmArch{name: "386", bigEndian: false, stack: "SP", lr: false}
asmArchArm = asmArch{name: "arm", bigEndian: false, stack: "R13", lr: true}
- asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true}
- asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false}
+ asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true, retRegs: []string{"R0", "F0"}}
+ asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false, retRegs: []string{"AX", "X0"}}
asmArchMips = asmArch{name: "mips", bigEndian: true, stack: "R29", lr: true}
asmArchMipsLE = asmArch{name: "mipsle", bigEndian: false, stack: "R29", lr: true}
asmArchMips64 = asmArch{name: "mips64", bigEndian: true, stack: "R29", lr: true}
@@ -139,7 +144,7 @@ var (
asmSP = re(`[^+\-0-9](([0-9]+)\(([A-Z0-9]+)\))`)
asmOpcode = re(`^\s*(?:[A-Z0-9a-z_]+:)?\s*([A-Z]+)\s*([^,]*)(?:,\s*(.*))?`)
ppc64Suff = re(`([BHWD])(ZU|Z|U|BR)?$`)
- abiSuff = re(`^(.+)<ABI.+>$`)
+ abiSuff = re(`^(.+)<(ABI.+)>$`)
)
func run(pass *analysis.Pass) (interface{}, error) {
@@ -187,6 +192,7 @@ Files:
var (
fn *asmFunc
fnName string
+ abi string
localSize, argSize int
wroteSP bool
noframe bool
@@ -197,18 +203,22 @@ Files:
flushRet := func() {
if fn != nil && fn.vars["ret"] != nil && !haveRetArg && len(retLine) > 0 {
v := fn.vars["ret"]
+ resultStr := fmt.Sprintf("%d-byte ret+%d(FP)", v.size, v.off)
+ if abi == "ABIInternal" {
+ resultStr = "result register"
+ }
for _, line := range retLine {
- pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %d-byte ret+%d(FP)", arch, fnName, v.size, v.off)
+ pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr)
}
}
retLine = nil
}
- trimABI := func(fnName string) string {
+ trimABI := func(fnName string) (string, string) {
m := abiSuff.FindStringSubmatch(fnName)
if m != nil {
- return m[1]
+ return m[1], m[2]
}
- return fnName
+ return fnName, ""
}
for lineno, line := range lines {
lineno++
@@ -275,11 +285,12 @@ Files:
// log.Printf("%s:%d: [%s] cannot check cross-package assembly function: %s is in package %s", fname, lineno, arch, fnName, pkgPath)
fn = nil
fnName = ""
+ abi = ""
continue
}
}
// Trim off optional ABI selector.
- fnName := trimABI(fnName)
+ fnName, abi = trimABI(fnName)
flag := m[3]
fn = knownFunc[fnName][arch]
if fn != nil {
@@ -307,6 +318,7 @@ Files:
flushRet()
fn = nil
fnName = ""
+ abi = ""
continue
}
@@ -337,6 +349,15 @@ Files:
haveRetArg = true
}
+ if abi == "ABIInternal" && !haveRetArg {
+ for _, reg := range archDef.retRegs {
+ if strings.Contains(line, reg) {
+ haveRetArg = true
+ break
+ }
+ }
+ }
+
for _, m := range asmSP.FindAllStringSubmatch(line, -1) {
if m[3] != archDef.stack || wroteSP || noframe {
continue
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/composite/composite.go b/libgo/go/golang.org/x/tools/go/analysis/passes/composite/composite.go
index 4c3ac6647f6..d3670aca97a 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/composite/composite.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/composite/composite.go
@@ -14,6 +14,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `check for unkeyed composite literals
@@ -67,41 +68,61 @@ func run(pass *analysis.Pass) (interface{}, error) {
// skip whitelisted types
return
}
- under := typ.Underlying()
- for {
- ptr, ok := under.(*types.Pointer)
- if !ok {
- break
+ var structuralTypes []types.Type
+ switch typ := typ.(type) {
+ case *typeparams.TypeParam:
+ terms, err := typeparams.StructuralTerms(typ)
+ if err != nil {
+ return // invalid type
}
- under = ptr.Elem().Underlying()
- }
- if _, ok := under.(*types.Struct); !ok {
- // skip non-struct composite literals
- return
- }
- if isLocalType(pass, typ) {
- // allow unkeyed locally defined composite literal
- return
+ for _, term := range terms {
+ structuralTypes = append(structuralTypes, term.Type())
+ }
+ default:
+ structuralTypes = append(structuralTypes, typ)
}
+ for _, typ := range structuralTypes {
+ under := deref(typ.Underlying())
+ if _, ok := under.(*types.Struct); !ok {
+ // skip non-struct composite literals
+ continue
+ }
+ if isLocalType(pass, typ) {
+ // allow unkeyed locally defined composite literal
+ continue
+ }
- // check if the CompositeLit contains an unkeyed field
- allKeyValue := true
- for _, e := range cl.Elts {
- if _, ok := e.(*ast.KeyValueExpr); !ok {
- allKeyValue = false
- break
+ // check if the CompositeLit contains an unkeyed field
+ allKeyValue := true
+ for _, e := range cl.Elts {
+ if _, ok := e.(*ast.KeyValueExpr); !ok {
+ allKeyValue = false
+ break
+ }
}
- }
- if allKeyValue {
- // all the composite literal fields are keyed
+ if allKeyValue {
+ // all the composite literal fields are keyed
+ continue
+ }
+
+ pass.ReportRangef(cl, "%s composite literal uses unkeyed fields", typeName)
return
}
-
- pass.ReportRangef(cl, "%s composite literal uses unkeyed fields", typeName)
})
return nil, nil
}
+func deref(typ types.Type) types.Type {
+ for {
+ ptr, ok := typ.(*types.Pointer)
+ if !ok {
+ break
+ }
+ typ = ptr.Elem().Underlying()
+ }
+ return typ
+}
+
func isLocalType(pass *analysis.Pass, typ types.Type) bool {
switch x := typ.(type) {
case *types.Struct:
@@ -112,6 +133,8 @@ func isLocalType(pass *analysis.Pass, typ types.Type) bool {
case *types.Named:
// names in package foo are local to foo_test too
return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test")
+ case *typeparams.TypeParam:
+ return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test")
}
return false
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/libgo/go/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
index c4ebf785710..350dc4e0fec 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
@@ -17,6 +17,7 @@ import (
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `check for locks erroneously passed by value
@@ -145,7 +146,7 @@ func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) {
func checkCopyLocksFunc(pass *analysis.Pass, name string, recv *ast.FieldList, typ *ast.FuncType) {
if recv != nil && len(recv.List) > 0 {
expr := recv.List[0].Type
- if path := lockPath(pass.Pkg, pass.TypesInfo.Types[expr].Type); path != nil {
+ if path := lockPath(pass.Pkg, pass.TypesInfo.Types[expr].Type, nil); path != nil {
pass.ReportRangef(expr, "%s passes lock by value: %v", name, path)
}
}
@@ -153,7 +154,7 @@ func checkCopyLocksFunc(pass *analysis.Pass, name string, recv *ast.FieldList, t
if typ.Params != nil {
for _, field := range typ.Params.List {
expr := field.Type
- if path := lockPath(pass.Pkg, pass.TypesInfo.Types[expr].Type); path != nil {
+ if path := lockPath(pass.Pkg, pass.TypesInfo.Types[expr].Type, nil); path != nil {
pass.ReportRangef(expr, "%s passes lock by value: %v", name, path)
}
}
@@ -199,12 +200,12 @@ func checkCopyLocksRangeVar(pass *analysis.Pass, rtok token.Token, e ast.Expr) {
if typ == nil {
return
}
- if path := lockPath(pass.Pkg, typ); path != nil {
+ if path := lockPath(pass.Pkg, typ, nil); path != nil {
pass.Reportf(e.Pos(), "range var %s copies lock: %v", analysisutil.Format(pass.Fset, e), path)
}
}
-type typePath []types.Type
+type typePath []string
// String pretty-prints a typePath.
func (path typePath) String() string {
@@ -215,7 +216,7 @@ func (path typePath) String() string {
fmt.Fprint(&buf, " contains ")
}
// The human-readable path is in reverse order, outermost to innermost.
- fmt.Fprint(&buf, path[n-i-1].String())
+ fmt.Fprint(&buf, path[n-i-1])
}
return buf.String()
}
@@ -234,16 +235,57 @@ func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath {
return nil
}
}
- return lockPath(pass.Pkg, pass.TypesInfo.Types[x].Type)
+ return lockPath(pass.Pkg, pass.TypesInfo.Types[x].Type, nil)
}
// lockPath returns a typePath describing the location of a lock value
// contained in typ. If there is no contained lock, it returns nil.
-func lockPath(tpkg *types.Package, typ types.Type) typePath {
+//
+// The seenTParams map is used to short-circuit infinite recursion via type
+// parameters.
+func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.TypeParam]bool) typePath {
if typ == nil {
return nil
}
+ if tpar, ok := typ.(*typeparams.TypeParam); ok {
+ if seenTParams == nil {
+ // Lazily allocate seenTParams, since the common case will not involve
+ // any type parameters.
+ seenTParams = make(map[*typeparams.TypeParam]bool)
+ }
+ if seenTParams[tpar] {
+ return nil
+ }
+ seenTParams[tpar] = true
+ terms, err := typeparams.StructuralTerms(tpar)
+ if err != nil {
+ return nil // invalid type
+ }
+ for _, term := range terms {
+ subpath := lockPath(tpkg, term.Type(), seenTParams)
+ if len(subpath) > 0 {
+ if term.Tilde() {
+ // Prepend a tilde to our lock path entry to clarify the resulting
+ // diagnostic message. Consider the following example:
+ //
+ // func _[Mutex interface{ ~sync.Mutex; M() }](m Mutex) {}
+ //
+ // Here the naive error message will be something like "passes lock
+ // by value: Mutex contains sync.Mutex". This is misleading because
+ // the local type parameter doesn't actually contain sync.Mutex,
+ // which lacks the M method.
+ //
+ // With tilde, it is clearer that the containment is via an
+ // approximation element.
+ subpath[len(subpath)-1] = "~" + subpath[len(subpath)-1]
+ }
+ return append(subpath, typ.String())
+ }
+ }
+ return nil
+ }
+
for {
atyp, ok := typ.Underlying().(*types.Array)
if !ok {
@@ -252,6 +294,17 @@ func lockPath(tpkg *types.Package, typ types.Type) typePath {
typ = atyp.Elem()
}
+ ttyp, ok := typ.Underlying().(*types.Tuple)
+ if ok {
+ for i := 0; i < ttyp.Len(); i++ {
+ subpath := lockPath(tpkg, ttyp.At(i).Type(), seenTParams)
+ if subpath != nil {
+ return append(subpath, typ.String())
+ }
+ }
+ return nil
+ }
+
// We're only interested in the case in which the underlying
// type is a struct. (Interfaces and pointers are safe to copy.)
styp, ok := typ.Underlying().(*types.Struct)
@@ -263,7 +316,7 @@ func lockPath(tpkg *types.Package, typ types.Type) typePath {
// is a sync.Locker, but a value is not. This differentiates
// embedded interfaces from embedded values.
if types.Implements(types.NewPointer(typ), lockerType) && !types.Implements(typ, lockerType) {
- return []types.Type{typ}
+ return []string{typ.String()}
}
// In go1.10, sync.noCopy did not implement Locker.
@@ -272,15 +325,15 @@ func lockPath(tpkg *types.Package, typ types.Type) typePath {
if named, ok := typ.(*types.Named); ok &&
named.Obj().Name() == "noCopy" &&
named.Obj().Pkg().Path() == "sync" {
- return []types.Type{typ}
+ return []string{typ.String()}
}
nfields := styp.NumFields()
for i := 0; i < nfields; i++ {
ftyp := styp.Field(i).Type()
- subpath := lockPath(tpkg, ftyp)
+ subpath := lockPath(tpkg, ftyp, seenTParams)
if subpath != nil {
- return append(subpath, typ)
+ return append(subpath, typ.String())
}
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/libgo/go/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go
index 51600ffc7eb..73746d6f04d 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go
@@ -187,7 +187,11 @@ func (c *CFGs) callMayReturn(call *ast.CallExpr) (r bool) {
return false // panic never returns
}
- // Is this a static call?
+ // Is this a static call? Also includes static functions
+ // parameterized by a type. Such functions may or may not
+ // return depending on the parameter type, but in some
+ // cases the answer is definite. We let ctrlflow figure
+ // that out.
fn := typeutil.StaticCallee(c.pass.TypesInfo, call)
if fn == nil {
return true // callee not statically known; be conservative
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go
index fd2285332cc..30130f63ea6 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go
@@ -51,6 +51,12 @@ func assertableTo(v, t types.Type) *types.Func {
if V == nil || T == nil {
return nil
}
+
+ // Mitigations for interface comparisons and generics.
+ // TODO(https://github.com/golang/go/issues/50658): Support more precise conclusion.
+ if isParameterized(V) || isParameterized(T) {
+ return nil
+ }
if f, wrongType := types.MissingMethod(V, T, false); wrongType {
return f
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go b/libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
new file mode 100644
index 00000000000..1285ecf1367
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
@@ -0,0 +1,112 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package ifaceassert
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// isParameterized reports whether typ contains any of the type parameters of tparams.
+//
+// NOTE: Adapted from go/types/infer.go. If that is exported in a future release remove this copy.
+func isParameterized(typ types.Type) bool {
+ w := tpWalker{
+ seen: make(map[types.Type]bool),
+ }
+ return w.isParameterized(typ)
+}
+
+type tpWalker struct {
+ seen map[types.Type]bool
+}
+
+func (w *tpWalker) isParameterized(typ types.Type) (res bool) {
+ // detect cycles
+ if x, ok := w.seen[typ]; ok {
+ return x
+ }
+ w.seen[typ] = false
+ defer func() {
+ w.seen[typ] = res
+ }()
+
+ switch t := typ.(type) {
+ case nil, *types.Basic: // TODO(gri) should nil be handled here?
+ break
+
+ case *types.Array:
+ return w.isParameterized(t.Elem())
+
+ case *types.Slice:
+ return w.isParameterized(t.Elem())
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ if w.isParameterized(t.Field(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Pointer:
+ return w.isParameterized(t.Elem())
+
+ case *types.Tuple:
+ n := t.Len()
+ for i := 0; i < n; i++ {
+ if w.isParameterized(t.At(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Signature:
+ // t.tparams may not be nil if we are looking at a signature
+ // of a generic function type (or an interface method) that is
+ // part of the type we're testing. We don't care about these type
+ // parameters.
+ // Similarly, the receiver of a method may declare (rather then
+ // use) type parameters, we don't care about those either.
+ // Thus, we only need to look at the input and result parameters.
+ return w.isParameterized(t.Params()) || w.isParameterized(t.Results())
+
+ case *types.Interface:
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ if w.isParameterized(t.Method(i).Type()) {
+ return true
+ }
+ }
+ terms, err := typeparams.InterfaceTermSet(t)
+ if err != nil {
+ panic(err)
+ }
+ for _, term := range terms {
+ if w.isParameterized(term.Type()) {
+ return true
+ }
+ }
+
+ case *types.Map:
+ return w.isParameterized(t.Key()) || w.isParameterized(t.Elem())
+
+ case *types.Chan:
+ return w.isParameterized(t.Elem())
+
+ case *types.Named:
+ list := typeparams.NamedTypeArgs(t)
+ for i, n := 0, list.Len(); i < n; i++ {
+ if w.isParameterized(list.At(i)) {
+ return true
+ }
+ }
+
+ case *typeparams.TypeParam:
+ return true
+
+ default:
+ panic(t) // unreachable
+ }
+
+ return false
+}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/libgo/go/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
index cd42c9897f2..e4c66df6d6d 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
@@ -14,6 +14,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `check for useless comparisons between functions and nil
@@ -59,6 +60,12 @@ func run(pass *analysis.Pass) (interface{}, error) {
obj = pass.TypesInfo.Uses[v]
case *ast.SelectorExpr:
obj = pass.TypesInfo.Uses[v.Sel]
+ case *ast.IndexExpr, *typeparams.IndexListExpr:
+ // Check generic functions such as "f[T1,T2]".
+ x, _, _, _ := typeparams.UnpackIndexExpr(v)
+ if id, ok := x.(*ast.Ident); ok {
+ obj = pass.TypesInfo.Uses[id]
+ }
default:
return
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/printf/printf.go b/libgo/go/golang.org/x/tools/go/analysis/passes/printf/printf.go
index 822820f06e9..dee37d78ae0 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/printf/printf.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/printf/printf.go
@@ -25,6 +25,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
func init() {
@@ -452,8 +453,15 @@ func stringConstantArg(pass *analysis.Pass, call *ast.CallExpr, idx int) (string
if idx >= len(call.Args) {
return "", false
}
- arg := call.Args[idx]
- lit := pass.TypesInfo.Types[arg].Value
+ return stringConstantExpr(pass, call.Args[idx])
+}
+
+// stringConstantExpr returns expression's string constant value.
+//
+// ("", false) is returned if expression isn't a string
+// constant.
+func stringConstantExpr(pass *analysis.Pass, expr ast.Expr) (string, bool) {
+ lit := pass.TypesInfo.Types[expr].Value
if lit != nil && lit.Kind() == constant.String {
return constant.StringVal(lit), true
}
@@ -490,7 +498,7 @@ func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func,
_, ok = isPrint[strings.ToLower(fn.Name())]
}
if ok {
- if fn.Name() == "Errorf" {
+ if fn.FullName() == "fmt.Errorf" {
kind = KindErrorf
} else if strings.HasSuffix(fn.Name(), "f") {
kind = KindPrintf
@@ -513,7 +521,12 @@ func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func,
func isFormatter(typ types.Type) bool {
// If the type is an interface, the value it holds might satisfy fmt.Formatter.
if _, ok := typ.Underlying().(*types.Interface); ok {
- return true
+ // Don't assume type parameters could be formatters. With the greater
+ // expressiveness of constraint interface syntax we expect more type safety
+ // when using type parameters.
+ if !typeparams.IsTypeParam(typ) {
+ return true
+ }
}
obj, _, _ := types.LookupFieldOrMethod(typ, false, nil, "Format")
fn, ok := obj.(*types.Func)
@@ -555,7 +568,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
format, idx := formatString(pass, call)
if idx < 0 {
if false {
- pass.Reportf(call.Lparen, "can't check non-constant format in call to %s", fn.Name())
+ pass.Reportf(call.Lparen, "can't check non-constant format in call to %s", fn.FullName())
}
return
}
@@ -563,7 +576,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
firstArg := idx + 1 // Arguments are immediately after format string.
if !strings.Contains(format, "%") {
if len(call.Args) > firstArg {
- pass.Reportf(call.Lparen, "%s call has arguments but no formatting directives", fn.Name())
+ pass.Reportf(call.Lparen, "%s call has arguments but no formatting directives", fn.FullName())
}
return
}
@@ -577,7 +590,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
if format[i] != '%' {
continue
}
- state := parsePrintfVerb(pass, call, fn.Name(), format[i:], firstArg, argNum)
+ state := parsePrintfVerb(pass, call, fn.FullName(), format[i:], firstArg, argNum)
if state == nil {
return
}
@@ -589,8 +602,9 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
anyIndex = true
}
if state.verb == 'w' {
- if kind != KindErrorf {
- pass.Reportf(call.Pos(), "%s call has error-wrapping directive %%w, which is only supported by Errorf", state.name)
+ switch kind {
+ case KindNone, KindPrint, KindPrintf:
+ pass.Reportf(call.Pos(), "%s does not support error-wrapping directive %%w", state.name)
return
}
if anyW {
@@ -621,7 +635,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
if maxArgNum != len(call.Args) {
expect := maxArgNum - firstArg
numArgs := len(call.Args) - firstArg
- pass.ReportRangef(call, "%s call needs %v but has %v", fn.Name(), count(expect, "arg"), count(numArgs, "arg"))
+ pass.ReportRangef(call, "%s call needs %v but has %v", fn.FullName(), count(expect, "arg"), count(numArgs, "arg"))
}
}
@@ -833,8 +847,9 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (o
}
// Could current arg implement fmt.Formatter?
+ // Skip check for the %w verb, which requires an error.
formatter := false
- if state.argNum < len(call.Args) {
+ if v.typ != argError && state.argNum < len(call.Args) {
if tv, ok := pass.TypesInfo.Types[call.Args[state.argNum]]; ok {
formatter = isFormatter(tv.Type)
}
@@ -870,8 +885,12 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (o
return
}
arg := call.Args[argNum]
- if !matchArgType(pass, argInt, nil, arg) {
- pass.ReportRangef(call, "%s format %s uses non-int %s as argument of *", state.name, state.format, analysisutil.Format(pass.Fset, arg))
+ if reason, ok := matchArgType(pass, argInt, arg); !ok {
+ details := ""
+ if reason != "" {
+ details = " (" + reason + ")"
+ }
+ pass.ReportRangef(call, "%s format %s uses non-int %s%s as argument of *", state.name, state.format, analysisutil.Format(pass.Fset, arg), details)
return false
}
}
@@ -888,12 +907,16 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (o
pass.ReportRangef(call, "%s format %s arg %s is a func value, not called", state.name, state.format, analysisutil.Format(pass.Fset, arg))
return false
}
- if !matchArgType(pass, v.typ, nil, arg) {
+ if reason, ok := matchArgType(pass, v.typ, arg); !ok {
typeString := ""
if typ := pass.TypesInfo.Types[arg].Type; typ != nil {
typeString = typ.String()
}
- pass.ReportRangef(call, "%s format %s has arg %s of wrong type %s", state.name, state.format, analysisutil.Format(pass.Fset, arg), typeString)
+ details := ""
+ if reason != "" {
+ details = " (" + reason + ")"
+ }
+ pass.ReportRangef(call, "%s format %s has arg %s of wrong type %s%s", state.name, state.format, analysisutil.Format(pass.Fset, arg), typeString, details)
return false
}
if v.typ&argString != 0 && v.verb != 'T' && !bytes.Contains(state.flags, []byte{'#'}) {
@@ -949,7 +972,7 @@ func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) {
}
if id, ok := e.(*ast.Ident); ok {
if pass.TypesInfo.Uses[id] == sig.Recv() {
- return method.Name(), true
+ return method.FullName(), true
}
}
return "", false
@@ -1044,40 +1067,39 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) {
if sel, ok := call.Args[0].(*ast.SelectorExpr); ok {
if x, ok := sel.X.(*ast.Ident); ok {
if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") {
- pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", fn.Name(), analysisutil.Format(pass.Fset, call.Args[0]))
+ pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", fn.FullName(), analysisutil.Format(pass.Fset, call.Args[0]))
}
}
}
}
arg := args[0]
- if lit, ok := arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
- // Ignore trailing % character in lit.Value.
+ if s, ok := stringConstantExpr(pass, arg); ok {
+ // Ignore trailing % character
// The % in "abc 0.0%" couldn't be a formatting directive.
- s := strings.TrimSuffix(lit.Value, `%"`)
+ s = strings.TrimSuffix(s, "%")
if strings.Contains(s, "%") {
m := printFormatRE.FindStringSubmatch(s)
if m != nil {
- pass.ReportRangef(call, "%s call has possible formatting directive %s", fn.Name(), m[0])
+ pass.ReportRangef(call, "%s call has possible formatting directive %s", fn.FullName(), m[0])
}
}
}
if strings.HasSuffix(fn.Name(), "ln") {
// The last item, if a string, should not have a newline.
arg = args[len(args)-1]
- if lit, ok := arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
- str, _ := strconv.Unquote(lit.Value)
- if strings.HasSuffix(str, "\n") {
- pass.ReportRangef(call, "%s arg list ends with redundant newline", fn.Name())
+ if s, ok := stringConstantExpr(pass, arg); ok {
+ if strings.HasSuffix(s, "\n") {
+ pass.ReportRangef(call, "%s arg list ends with redundant newline", fn.FullName())
}
}
}
for _, arg := range args {
if isFunctionValue(pass, arg) {
- pass.ReportRangef(call, "%s arg %s is a func value, not called", fn.Name(), analysisutil.Format(pass.Fset, arg))
+ pass.ReportRangef(call, "%s arg %s is a func value, not called", fn.FullName(), analysisutil.Format(pass.Fset, arg))
}
if methodName, ok := recursiveStringer(pass, arg); ok {
- pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", fn.Name(), analysisutil.Format(pass.Fset, arg), methodName)
+ pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", fn.FullName(), analysisutil.Format(pass.Fset, arg), methodName)
}
}
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/printf/types.go b/libgo/go/golang.org/x/tools/go/analysis/passes/printf/types.go
index 6a5fae44f46..270e917c809 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/printf/types.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/printf/types.go
@@ -5,45 +5,60 @@
package printf
import (
+ "fmt"
"go/ast"
"go/types"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+ "golang.org/x/tools/internal/typeparams"
)
var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
-// matchArgType reports an error if printf verb t is not appropriate
-// for operand arg.
+// matchArgType reports an error if printf verb t is not appropriate for
+// operand arg.
//
-// typ is used only for recursive calls; external callers must supply nil.
-//
-// (Recursion arises from the compound types {map,chan,slice} which
-// may be printed with %d etc. if that is appropriate for their element
-// types.)
-func matchArgType(pass *analysis.Pass, t printfArgType, typ types.Type, arg ast.Expr) bool {
- return matchArgTypeInternal(pass, t, typ, arg, make(map[types.Type]bool))
-}
-
-// matchArgTypeInternal is the internal version of matchArgType. It carries a map
-// remembering what types are in progress so we don't recur when faced with recursive
-// types or mutually recursive types.
-func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type, arg ast.Expr, inProgress map[types.Type]bool) bool {
+// If arg is a type parameter, the verb t must be appropriate for every type in
+// the type parameter type set.
+func matchArgType(pass *analysis.Pass, t printfArgType, arg ast.Expr) (reason string, ok bool) {
// %v, %T accept any argument type.
if t == anyType {
- return true
+ return "", true
}
+
+ typ := pass.TypesInfo.Types[arg].Type
if typ == nil {
- // external call
- typ = pass.TypesInfo.Types[arg].Type
- if typ == nil {
- return true // probably a type check problem
- }
+ return "", true // probably a type check problem
}
+ m := &argMatcher{t: t, seen: make(map[types.Type]bool)}
+ ok = m.match(typ, true)
+ return m.reason, ok
+}
+
+// argMatcher recursively matches types against the printfArgType t.
+//
+// To short-circuit recursion, it keeps track of types that have already been
+// matched (or are in the process of being matched) via the seen map. Recursion
+// arises from the compound types {map,chan,slice} which may be printed with %d
+// etc. if that is appropriate for their element types, as well as from type
+// parameters, which are expanded to the constituents of their type set.
+//
+// The reason field may be set to report the cause of the mismatch.
+type argMatcher struct {
+ t printfArgType
+ seen map[types.Type]bool
+ reason string
+}
+
+// match checks if typ matches m's printf arg type. If topLevel is true, typ is
+// the actual type of the printf arg, for which special rules apply. As a
+// special case, top level type parameters pass topLevel=true when checking for
+// matches among the constituents of their type set, as type arguments will
+// replace the type parameter at compile time.
+func (m *argMatcher) match(typ types.Type, topLevel bool) bool {
// %w accepts only errors.
- if t == argError {
+ if m.t == argError {
return types.ConvertibleTo(typ, errorType)
}
@@ -51,65 +66,122 @@ func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type,
if isFormatter(typ) {
return true
}
+
// If we can use a string, might arg (dynamically) implement the Stringer or Error interface?
- if t&argString != 0 && isConvertibleToString(pass, typ) {
+ if m.t&argString != 0 && isConvertibleToString(typ) {
+ return true
+ }
+
+ if typ, _ := typ.(*typeparams.TypeParam); typ != nil {
+ // Avoid infinite recursion through type parameters.
+ if m.seen[typ] {
+ return true
+ }
+ m.seen[typ] = true
+ terms, err := typeparams.StructuralTerms(typ)
+ if err != nil {
+ return true // invalid type (possibly an empty type set)
+ }
+
+ if len(terms) == 0 {
+ // No restrictions on the underlying of typ. Type parameters implementing
+ // error, fmt.Formatter, or fmt.Stringer were handled above, and %v and
+ // %T was handled in matchType. We're about to check restrictions the
+ // underlying; if the underlying type is unrestricted there must be an
+ // element of the type set that violates one of the arg type checks
+ // below, so we can safely return false here.
+
+ if m.t == anyType { // anyType must have already been handled.
+ panic("unexpected printfArgType")
+ }
+ return false
+ }
+
+ // Only report a reason if typ is the argument type, otherwise it won't
+ // make sense. Note that it is not sufficient to check if topLevel == here,
+ // as type parameters can have a type set consisting of other type
+ // parameters.
+ reportReason := len(m.seen) == 1
+
+ for _, term := range terms {
+ if !m.match(term.Type(), topLevel) {
+ if reportReason {
+ if term.Tilde() {
+ m.reason = fmt.Sprintf("contains ~%s", term.Type())
+ } else {
+ m.reason = fmt.Sprintf("contains %s", term.Type())
+ }
+ }
+ return false
+ }
+ }
return true
}
typ = typ.Underlying()
- if inProgress[typ] {
- // We're already looking at this type. The call that started it will take care of it.
+ if m.seen[typ] {
+ // We've already considered typ, or are in the process of considering it.
+ // In case we've already considered typ, it must have been valid (else we
+ // would have stopped matching). In case we're in the process of
+ // considering it, we must avoid infinite recursion.
+ //
+ // There are some pathological cases where returning true here is
+ // incorrect, for example `type R struct { F []R }`, but these are
+ // acceptable false negatives.
return true
}
- inProgress[typ] = true
+ m.seen[typ] = true
switch typ := typ.(type) {
case *types.Signature:
- return t == argPointer
+ return m.t == argPointer
case *types.Map:
- return t == argPointer ||
- // Recur: map[int]int matches %d.
- (matchArgTypeInternal(pass, t, typ.Key(), arg, inProgress) && matchArgTypeInternal(pass, t, typ.Elem(), arg, inProgress))
+ if m.t == argPointer {
+ return true
+ }
+ // Recur: map[int]int matches %d.
+ return m.match(typ.Key(), false) && m.match(typ.Elem(), false)
case *types.Chan:
- return t&argPointer != 0
+ return m.t&argPointer != 0
case *types.Array:
// Same as slice.
- if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && t&argString != 0 {
+ if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && m.t&argString != 0 {
return true // %s matches []byte
}
// Recur: []int matches %d.
- return matchArgTypeInternal(pass, t, typ.Elem(), arg, inProgress)
+ return m.match(typ.Elem(), false)
case *types.Slice:
// Same as array.
- if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && t&argString != 0 {
+ if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && m.t&argString != 0 {
return true // %s matches []byte
}
- if t == argPointer {
+ if m.t == argPointer {
return true // %p prints a slice's 0th element
}
// Recur: []int matches %d. But watch out for
// type T []T
// If the element is a pointer type (type T[]*T), it's handled fine by the Pointer case below.
- return matchArgTypeInternal(pass, t, typ.Elem(), arg, inProgress)
+ return m.match(typ.Elem(), false)
case *types.Pointer:
// Ugly, but dealing with an edge case: a known pointer to an invalid type,
// probably something from a failed import.
- if typ.Elem().String() == "invalid type" {
- if false {
- pass.Reportf(arg.Pos(), "printf argument %v is pointer to invalid or unknown type", analysisutil.Format(pass.Fset, arg))
- }
+ if typ.Elem() == types.Typ[types.Invalid] {
return true // special case
}
// If it's actually a pointer with %p, it prints as one.
- if t == argPointer {
+ if m.t == argPointer {
return true
}
+ if typeparams.IsTypeParam(typ.Elem()) {
+ return true // We don't know whether the logic below applies. Give up.
+ }
+
under := typ.Elem().Underlying()
switch under.(type) {
case *types.Struct: // see below
@@ -118,19 +190,31 @@ func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type,
case *types.Map: // see below
default:
// Check whether the rest can print pointers.
- return t&argPointer != 0
+ return m.t&argPointer != 0
}
- // If it's a top-level pointer to a struct, array, slice, or
+ // If it's a top-level pointer to a struct, array, slice, type param, or
// map, that's equivalent in our analysis to whether we can
// print the type being pointed to. Pointers in nested levels
// are not supported to minimize fmt running into loops.
- if len(inProgress) > 1 {
+ if !topLevel {
return false
}
- return matchArgTypeInternal(pass, t, under, arg, inProgress)
+ return m.match(under, false)
case *types.Struct:
- return matchStructArgType(pass, t, typ, arg, inProgress)
+ // report whether all the elements of the struct match the expected type. For
+ // instance, with "%d" all the elements must be printable with the "%d" format.
+ for i := 0; i < typ.NumFields(); i++ {
+ typf := typ.Field(i)
+ if !m.match(typf.Type(), false) {
+ return false
+ }
+ if m.t&argString != 0 && !typf.Exported() && isConvertibleToString(typf.Type()) {
+ // Issue #17798: unexported Stringer or error cannot be properly formatted.
+ return false
+ }
+ }
+ return true
case *types.Interface:
// There's little we can do.
@@ -142,7 +226,7 @@ func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type,
switch typ.Kind() {
case types.UntypedBool,
types.Bool:
- return t&argBool != 0
+ return m.t&argBool != 0
case types.UntypedInt,
types.Int,
@@ -156,35 +240,32 @@ func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type,
types.Uint32,
types.Uint64,
types.Uintptr:
- return t&argInt != 0
+ return m.t&argInt != 0
case types.UntypedFloat,
types.Float32,
types.Float64:
- return t&argFloat != 0
+ return m.t&argFloat != 0
case types.UntypedComplex,
types.Complex64,
types.Complex128:
- return t&argComplex != 0
+ return m.t&argComplex != 0
case types.UntypedString,
types.String:
- return t&argString != 0
+ return m.t&argString != 0
case types.UnsafePointer:
- return t&(argPointer|argInt) != 0
+ return m.t&(argPointer|argInt) != 0
case types.UntypedRune:
- return t&(argInt|argRune) != 0
+ return m.t&(argInt|argRune) != 0
case types.UntypedNil:
return false
case types.Invalid:
- if false {
- pass.Reportf(arg.Pos(), "printf argument %v has invalid or unknown type", analysisutil.Format(pass.Fset, arg))
- }
return true // Probably a type check problem.
}
panic("unreachable")
@@ -193,7 +274,7 @@ func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type,
return false
}
-func isConvertibleToString(pass *analysis.Pass, typ types.Type) bool {
+func isConvertibleToString(typ types.Type) bool {
if bt, ok := typ.(*types.Basic); ok && bt.Kind() == types.UntypedNil {
// We explicitly don't want untyped nil, which is
// convertible to both of the interfaces below, as it
@@ -228,19 +309,3 @@ func hasBasicType(pass *analysis.Pass, x ast.Expr, kind types.BasicKind) bool {
b, ok := t.(*types.Basic)
return ok && b.Kind() == kind
}
-
-// matchStructArgType reports whether all the elements of the struct match the expected
-// type. For instance, with "%d" all the elements must be printable with the "%d" format.
-func matchStructArgType(pass *analysis.Pass, t printfArgType, typ *types.Struct, arg ast.Expr, inProgress map[types.Type]bool) bool {
- for i := 0; i < typ.NumFields(); i++ {
- typf := typ.Field(i)
- if !matchArgTypeInternal(pass, t, typf.Type(), arg, inProgress) {
- return false
- }
- if t&argString != 0 && !typf.Exported() && isConvertibleToString(pass, typf.Type()) {
- // Issue #17798: unexported Stringer or error cannot be properly formatted.
- return false
- }
- }
- return true
-}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/shift/shift.go b/libgo/go/golang.org/x/tools/go/analysis/passes/shift/shift.go
index 1f3df07ccd1..e968f27b403 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/shift/shift.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/shift/shift.go
@@ -14,11 +14,14 @@ import (
"go/ast"
"go/constant"
"go/token"
+ "go/types"
+ "math"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = "check for shifts that equal or exceed the width of the integer"
@@ -93,9 +96,36 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) {
if t == nil {
return
}
- size := 8 * pass.TypesSizes.Sizeof(t)
- if amt >= size {
+ var structuralTypes []types.Type
+ switch t := t.(type) {
+ case *typeparams.TypeParam:
+ terms, err := typeparams.StructuralTerms(t)
+ if err != nil {
+ return // invalid type
+ }
+ for _, term := range terms {
+ structuralTypes = append(structuralTypes, term.Type())
+ }
+ default:
+ structuralTypes = append(structuralTypes, t)
+ }
+ sizes := make(map[int64]struct{})
+ for _, t := range structuralTypes {
+ size := 8 * pass.TypesSizes.Sizeof(t)
+ sizes[size] = struct{}{}
+ }
+ minSize := int64(math.MaxInt64)
+ for size := range sizes {
+ if size < minSize {
+ minSize = size
+ }
+ }
+ if amt >= minSize {
ident := analysisutil.Format(pass.Fset, x)
- pass.ReportRangef(node, "%s (%d bits) too small for shift of %d", ident, size, amt)
+ qualifier := ""
+ if len(sizes) > 1 {
+ qualifier = "may be "
+ }
+ pass.ReportRangef(node, "%s (%s%d bits) too small for shift of %d", ident, qualifier, minSize, amt)
}
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/libgo/go/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go
index 64a28ac0b97..cc9497179da 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go
@@ -61,7 +61,7 @@ var Analyzer = &analysis.Analyzer{
// we let it go. But if it does have a fmt.ScanState, then the
// rest has to match.
var canonicalMethods = map[string]struct{ args, results []string }{
- "As": {[]string{"interface{}"}, []string{"bool"}}, // errors.As
+ "As": {[]string{"any"}, []string{"bool"}}, // errors.As
// "Flush": {{}, {"error"}}, // http.Flusher and jpeg.writer conflict
"Format": {[]string{"=fmt.State", "rune"}, []string{}}, // fmt.Formatter
"GobDecode": {[]string{"[]byte"}, []string{"error"}}, // gob.GobDecoder
@@ -194,7 +194,9 @@ func matchParams(pass *analysis.Pass, expect []string, actual *types.Tuple, pref
func matchParamType(expect string, actual types.Type) bool {
expect = strings.TrimPrefix(expect, "=")
// Overkill but easy.
- return typeString(actual) == expect
+ t := typeString(actual)
+ return t == expect ||
+ (t == "any" || t == "interface{}") && (expect == "any" || expect == "interface{}")
}
var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/libgo/go/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
index 7a005901e84..e41de809de1 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
@@ -10,10 +10,12 @@ import (
"fmt"
"go/ast"
"go/types"
+ "strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `check for string(int) conversions
@@ -36,6 +38,35 @@ var Analyzer = &analysis.Analyzer{
Run: run,
}
+// describe returns a string describing the type typ contained within the type
+// set of inType. If non-empty, inName is used as the name of inType (this is
+// necessary so that we can use alias type names that may not be reachable from
+// inType itself).
+func describe(typ, inType types.Type, inName string) string {
+ name := inName
+ if typ != inType {
+ name = typeName(typ)
+ }
+ if name == "" {
+ return ""
+ }
+
+ var parentheticals []string
+ if underName := typeName(typ.Underlying()); underName != "" && underName != name {
+ parentheticals = append(parentheticals, underName)
+ }
+
+ if typ != inType && inName != "" && inName != name {
+ parentheticals = append(parentheticals, "in "+inName)
+ }
+
+ if len(parentheticals) > 0 {
+ name += " (" + strings.Join(parentheticals, ", ") + ")"
+ }
+
+ return name
+}
+
func typeName(typ types.Type) string {
if v, _ := typ.(interface{ Name() string }); v != nil {
return v.Name()
@@ -54,6 +85,11 @@ func run(pass *analysis.Pass) (interface{}, error) {
inspect.Preorder(nodeFilter, func(n ast.Node) {
call := n.(*ast.CallExpr)
+ if len(call.Args) != 1 {
+ return
+ }
+ arg := call.Args[0]
+
// Retrieve target type name.
var tname *types.TypeName
switch fun := call.Fun.(type) {
@@ -65,62 +101,119 @@ func run(pass *analysis.Pass) (interface{}, error) {
if tname == nil {
return
}
- target := tname.Name()
- // Check that target type T in T(v) has an underlying type of string.
- T, _ := tname.Type().Underlying().(*types.Basic)
- if T == nil || T.Kind() != types.String {
- return
+ // In the conversion T(v) of a value v of type V to a target type T, we
+ // look for types T0 in the type set of T and V0 in the type set of V, such
+ // that V0->T0 is a problematic conversion. If T and V are not type
+ // parameters, this amounts to just checking if V->T is a problematic
+ // conversion.
+
+ // First, find a type T0 in T that has an underlying type of string.
+ T := tname.Type()
+ ttypes, err := structuralTypes(T)
+ if err != nil {
+ return // invalid type
}
- if s := T.Name(); target != s {
- target += " (" + s + ")"
+
+ var T0 types.Type // string type in the type set of T
+
+ for _, tt := range ttypes {
+ u, _ := tt.Underlying().(*types.Basic)
+ if u != nil && u.Kind() == types.String {
+ T0 = tt
+ break
+ }
}
- // Check that type V of v has an underlying integral type that is not byte or rune.
- if len(call.Args) != 1 {
+ if T0 == nil {
+ // No target types have an underlying type of string.
return
}
- v := call.Args[0]
- vtyp := pass.TypesInfo.TypeOf(v)
- V, _ := vtyp.Underlying().(*types.Basic)
- if V == nil || V.Info()&types.IsInteger == 0 {
- return
+
+ // Next, find a type V0 in V that has an underlying integral type that is
+ // not byte or rune.
+ V := pass.TypesInfo.TypeOf(arg)
+ vtypes, err := structuralTypes(V)
+ if err != nil {
+ return // invalid type
}
- switch V.Kind() {
- case types.Byte, types.Rune, types.UntypedRune:
- return
+
+ var V0 types.Type // integral type in the type set of V
+
+ for _, vt := range vtypes {
+ u, _ := vt.Underlying().(*types.Basic)
+ if u != nil && u.Info()&types.IsInteger != 0 {
+ switch u.Kind() {
+ case types.Byte, types.Rune, types.UntypedRune:
+ continue
+ }
+ V0 = vt
+ break
+ }
}
- // Retrieve source type name.
- source := typeName(vtyp)
- if source == "" {
+ if V0 == nil {
+ // No source types are non-byte or rune integer types.
return
}
- if s := V.Name(); source != s {
- source += " (" + s + ")"
+
+ convertibleToRune := true // if true, we can suggest a fix
+ for _, t := range vtypes {
+ if !types.ConvertibleTo(t, types.Typ[types.Rune]) {
+ convertibleToRune = false
+ break
+ }
+ }
+
+ target := describe(T0, T, tname.Name())
+ source := describe(V0, V, typeName(V))
+
+ if target == "" || source == "" {
+ return // something went wrong
}
+
diag := analysis.Diagnostic{
Pos: n.Pos(),
Message: fmt.Sprintf("conversion from %s to %s yields a string of one rune, not a string of digits (did you mean fmt.Sprint(x)?)", source, target),
- SuggestedFixes: []analysis.SuggestedFix{
+ }
+
+ if convertibleToRune {
+ diag.SuggestedFixes = []analysis.SuggestedFix{
{
Message: "Did you mean to convert a rune to a string?",
TextEdits: []analysis.TextEdit{
{
- Pos: v.Pos(),
- End: v.Pos(),
+ Pos: arg.Pos(),
+ End: arg.Pos(),
NewText: []byte("rune("),
},
{
- Pos: v.End(),
- End: v.End(),
+ Pos: arg.End(),
+ End: arg.End(),
NewText: []byte(")"),
},
},
},
- },
+ }
}
pass.Report(diag)
})
return nil, nil
}
+
+func structuralTypes(t types.Type) ([]types.Type, error) {
+ var structuralTypes []types.Type
+ switch t := t.(type) {
+ case *typeparams.TypeParam:
+ terms, err := typeparams.StructuralTerms(t)
+ if err != nil {
+ return nil, err
+ }
+ for _, term := range terms {
+ structuralTypes = append(structuralTypes, term.Type())
+ }
+ default:
+ structuralTypes = append(structuralTypes, t)
+ }
+ return structuralTypes, nil
+}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/libgo/go/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
index d2b9a5640d9..7ea8f77e335 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
@@ -11,6 +11,7 @@ import (
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `report calls to (*testing.T).Fatal from goroutines started by a test.
@@ -119,11 +120,44 @@ func typeIsTestingDotTOrB(expr ast.Expr) (string, bool) {
return varTypeName, ok
}
+// goStmtFunc returns the ast.Node of a call expression
+// that was invoked as a go statement. Currently, only
+// function literals declared in the same function, and
+// static calls within the same package are supported.
+func goStmtFun(goStmt *ast.GoStmt) ast.Node {
+ switch fun := goStmt.Call.Fun.(type) {
+ case *ast.IndexExpr, *typeparams.IndexListExpr:
+ x, _, _, _ := typeparams.UnpackIndexExpr(fun)
+ id, _ := x.(*ast.Ident)
+ if id == nil {
+ break
+ }
+ if id.Obj == nil {
+ break
+ }
+ if funDecl, ok := id.Obj.Decl.(ast.Node); ok {
+ return funDecl
+ }
+ case *ast.Ident:
+ // TODO(cuonglm): improve this once golang/go#48141 resolved.
+ if fun.Obj == nil {
+ break
+ }
+ if funDecl, ok := fun.Obj.Decl.(ast.Node); ok {
+ return funDecl
+ }
+ case *ast.FuncLit:
+ return goStmt.Call.Fun
+ }
+ return goStmt.Call
+}
+
// checkGoStmt traverses the goroutine and checks for the
// use of the forbidden *testing.(B, T) methods.
func checkGoStmt(pass *analysis.Pass, goStmt *ast.GoStmt) {
+ fn := goStmtFun(goStmt)
// Otherwise examine the goroutine to check for the forbidden methods.
- ast.Inspect(goStmt, func(n ast.Node) bool {
+ ast.Inspect(fn, func(n ast.Node) bool {
selExpr, ok := n.(*ast.SelectorExpr)
if !ok {
return true
@@ -147,7 +181,11 @@ func checkGoStmt(pass *analysis.Pass, goStmt *ast.GoStmt) {
return true
}
if typeName, ok := typeIsTestingDotTOrB(field.Type); ok {
- pass.ReportRangef(selExpr, "call to (*%s).%s from a non-test goroutine", typeName, selExpr.Sel)
+ var fnRange analysis.Range = goStmt
+ if _, ok := fn.(*ast.FuncLit); ok {
+ fnRange = selExpr
+ }
+ pass.ReportRangef(fnRange, "call to (*%s).%s from a non-test goroutine", typeName, selExpr.Sel)
}
return true
})
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/tests/tests.go b/libgo/go/golang.org/x/tools/go/analysis/passes/tests/tests.go
index 8232276186a..2c878824966 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/tests/tests.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/tests/tests.go
@@ -8,12 +8,15 @@ package tests
import (
"go/ast"
+ "go/token"
"go/types"
+ "regexp"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `check for common mistaken usages of tests and examples
@@ -42,10 +45,10 @@ func run(pass *analysis.Pass) (interface{}, error) {
// Ignore non-functions or functions with receivers.
continue
}
-
switch {
case strings.HasPrefix(fn.Name.Name, "Example"):
- checkExample(pass, fn)
+ checkExampleName(pass, fn)
+ checkExampleOutput(pass, fn, f.Comments)
case strings.HasPrefix(fn.Name.Name, "Test"):
checkTest(pass, fn, "Test")
case strings.HasPrefix(fn.Name.Name, "Benchmark"):
@@ -108,7 +111,59 @@ func lookup(pkg *types.Package, name string) []types.Object {
return ret
}
-func checkExample(pass *analysis.Pass, fn *ast.FuncDecl) {
+// This pattern is taken from /go/src/go/doc/example.go
+var outputRe = regexp.MustCompile(`(?i)^[[:space:]]*(unordered )?output:`)
+
+type commentMetadata struct {
+ isOutput bool
+ pos token.Pos
+}
+
+func checkExampleOutput(pass *analysis.Pass, fn *ast.FuncDecl, fileComments []*ast.CommentGroup) {
+ commentsInExample := []commentMetadata{}
+ numOutputs := 0
+
+ // Find the comment blocks that are in the example. These comments are
+ // guaranteed to be in order of appearance.
+ for _, cg := range fileComments {
+ if cg.Pos() < fn.Pos() {
+ continue
+ } else if cg.End() > fn.End() {
+ break
+ }
+
+ isOutput := outputRe.MatchString(cg.Text())
+ if isOutput {
+ numOutputs++
+ }
+
+ commentsInExample = append(commentsInExample, commentMetadata{
+ isOutput: isOutput,
+ pos: cg.Pos(),
+ })
+ }
+
+ // Change message based on whether there are multiple output comment blocks.
+ msg := "output comment block must be the last comment block"
+ if numOutputs > 1 {
+ msg = "there can only be one output comment block per example"
+ }
+
+ for i, cg := range commentsInExample {
+ // Check for output comments that are not the last comment in the example.
+ isLast := (i == len(commentsInExample)-1)
+ if cg.isOutput && !isLast {
+ pass.Report(
+ analysis.Diagnostic{
+ Pos: cg.pos,
+ Message: msg,
+ },
+ )
+ }
+ }
+}
+
+func checkExampleName(pass *analysis.Pass, fn *ast.FuncDecl) {
fnName := fn.Name.Name
if params := fn.Type.Params; len(params.List) != 0 {
pass.Reportf(fn.Pos(), "%s should be niladic", fnName)
@@ -116,6 +171,9 @@ func checkExample(pass *analysis.Pass, fn *ast.FuncDecl) {
if results := fn.Type.Results; results != nil && len(results.List) != 0 {
pass.Reportf(fn.Pos(), "%s should return nothing", fnName)
}
+ if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 {
+ pass.Reportf(fn.Pos(), "%s should not have type params", fnName)
+ }
if fnName == "Example" {
// Nothing more to do.
@@ -182,6 +240,12 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) {
return
}
+ if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 {
+ // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters.
+ // We have currently decided to also warn before compilation/package loading. This can help users in IDEs.
+ pass.Reportf(fn.Pos(), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix)
+ }
+
if !isTestSuffix(fn.Name.Name[len(prefix):]) {
pass.Reportf(fn.Pos(), "%s has malformed name: first letter after '%s' must not be lowercase", fn.Name.Name, prefix)
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/libgo/go/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
index 92b37caff9f..5129048a076 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
@@ -14,6 +14,7 @@ import (
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `report passing non-pointer or non-interface values to unmarshal
@@ -85,7 +86,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
t := pass.TypesInfo.Types[call.Args[argidx]].Type
switch t.Underlying().(type) {
- case *types.Pointer, *types.Interface:
+ case *types.Pointer, *types.Interface, *typeparams.TypeParam:
return
}
diff --git a/libgo/go/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/libgo/go/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
index bececee7e93..06747ba72b9 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
@@ -17,6 +17,7 @@ import (
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/typeparams"
)
// TODO(adonovan): make this analysis modular: export a mustUseResult
@@ -70,6 +71,11 @@ func run(pass *analysis.Pass) (interface{}, error) {
return // a conversion, not a call
}
+ x, _, _, _ := typeparams.UnpackIndexExpr(fun)
+ if x != nil {
+ fun = x // If this is generic function or method call, skip the instantiation arguments
+ }
+
selector, ok := fun.(*ast.SelectorExpr)
if !ok {
return // neither a method call nor a qualified ident
diff --git a/libgo/go/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/libgo/go/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
index 323282136fb..393954a238b 100644
--- a/libgo/go/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
+++ b/libgo/go/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
@@ -51,6 +51,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/analysisflags"
"golang.org/x/tools/go/analysis/internal/facts"
+ "golang.org/x/tools/internal/typeparams"
)
// A Config describes a compilation unit to be analyzed.
@@ -233,6 +234,8 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
+ typeparams.InitInstanceInfo(info)
+
pkg, err := tc.Check(cfg.ImportPath, fset, files, info)
if err != nil {
if cfg.SucceedOnTypecheckFailure {
diff --git a/libgo/go/golang.org/x/tools/go/ast/astutil/enclosing.go b/libgo/go/golang.org/x/tools/go/ast/astutil/enclosing.go
index 6b7052b892c..a5c6d6d4fa0 100644
--- a/libgo/go/golang.org/x/tools/go/ast/astutil/enclosing.go
+++ b/libgo/go/golang.org/x/tools/go/ast/astutil/enclosing.go
@@ -11,6 +11,8 @@ import (
"go/ast"
"go/token"
"sort"
+
+ "golang.org/x/tools/internal/typeparams"
)
// PathEnclosingInterval returns the node that encloses the source
@@ -294,8 +296,8 @@ func childrenOf(n ast.Node) []ast.Node {
case *ast.FieldList:
children = append(children,
- tok(n.Opening, len("(")),
- tok(n.Closing, len(")")))
+ tok(n.Opening, len("(")), // or len("[")
+ tok(n.Closing, len(")"))) // or len("]")
case *ast.File:
// TODO test: Doc
@@ -322,6 +324,9 @@ func childrenOf(n ast.Node) []ast.Node {
children = append(children, n.Recv)
}
children = append(children, n.Name)
+ if tparams := typeparams.ForFuncType(n.Type); tparams != nil {
+ children = append(children, tparams)
+ }
if n.Type.Params != nil {
children = append(children, n.Type.Params)
}
@@ -371,8 +376,13 @@ func childrenOf(n ast.Node) []ast.Node {
case *ast.IndexExpr:
children = append(children,
- tok(n.Lbrack, len("{")),
- tok(n.Rbrack, len("}")))
+ tok(n.Lbrack, len("[")),
+ tok(n.Rbrack, len("]")))
+
+ case *typeparams.IndexListExpr:
+ children = append(children,
+ tok(n.Lbrack, len("[")),
+ tok(n.Rbrack, len("]")))
case *ast.InterfaceType:
children = append(children,
@@ -581,6 +591,8 @@ func NodeDescription(n ast.Node) string {
return "decrement statement"
case *ast.IndexExpr:
return "index expression"
+ case *typeparams.IndexListExpr:
+ return "index list expression"
case *ast.InterfaceType:
return "interface type"
case *ast.KeyValueExpr:
diff --git a/libgo/go/golang.org/x/tools/go/ast/astutil/rewrite.go b/libgo/go/golang.org/x/tools/go/ast/astutil/rewrite.go
index cf72ea990bd..6d9ca23e2b0 100644
--- a/libgo/go/golang.org/x/tools/go/ast/astutil/rewrite.go
+++ b/libgo/go/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -9,6 +9,8 @@ import (
"go/ast"
"reflect"
"sort"
+
+ "golang.org/x/tools/internal/typeparams"
)
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
@@ -251,6 +253,10 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
a.apply(n, "X", nil, n.X)
a.apply(n, "Index", nil, n.Index)
+ case *typeparams.IndexListExpr:
+ a.apply(n, "X", nil, n.X)
+ a.applyList(n, "Indices")
+
case *ast.SliceExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Low", nil, n.Low)
diff --git a/libgo/go/golang.org/x/tools/go/ast/inspector/typeof.go b/libgo/go/golang.org/x/tools/go/ast/inspector/typeof.go
index b6b00cf2e1e..11f4fc369a0 100644
--- a/libgo/go/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/libgo/go/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -9,7 +9,11 @@ package inspector
// The initial map-based implementation was too slow;
// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
-import "go/ast"
+import (
+ "go/ast"
+
+ "golang.org/x/tools/internal/typeparams"
+)
const (
nArrayType = iota
@@ -47,6 +51,7 @@ const (
nImportSpec
nIncDecStmt
nIndexExpr
+ nIndexListExpr
nInterfaceType
nKeyValueExpr
nLabeledStmt
@@ -164,6 +169,8 @@ func typeOf(n ast.Node) uint64 {
return 1 << nIncDecStmt
case *ast.IndexExpr:
return 1 << nIndexExpr
+ case *typeparams.IndexListExpr:
+ return 1 << nIndexListExpr
case *ast.InterfaceType:
return 1 << nInterfaceType
case *ast.KeyValueExpr:
diff --git a/libgo/go/golang.org/x/tools/go/types/objectpath/objectpath.go b/libgo/go/golang.org/x/tools/go/types/objectpath/objectpath.go
index cffd7acbee7..7e96fc234e5 100644
--- a/libgo/go/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/libgo/go/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -23,10 +23,12 @@ package objectpath
import (
"fmt"
+ "go/types"
+ "sort"
"strconv"
"strings"
- "go/types"
+ "golang.org/x/tools/internal/typeparams"
)
// A Path is an opaque name that identifies a types.Object
@@ -57,12 +59,16 @@ type Path string
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
// - The only OT operator is Object.Type,
// which we encode as '.' because dot cannot appear in an identifier.
-// - The TT operators are encoded as [EKPRU].
-// - The OT operators are encoded as [AFMO];
+// - The TT operators are encoded as [EKPRUTC];
+// one of these (TypeParam) requires an integer operand,
+// which is encoded as a string of decimal digits.
+// - The TO operators are encoded as [AFMO];
// three of these (At,Field,Method) require an integer operand,
// which is encoded as a string of decimal digits.
// These indices are stable across different representations
// of the same package, even source and export data.
+// The indices used are implementation specific and may not correspond to
+// the argument to the go/types function.
//
// In the example below,
//
@@ -89,17 +95,19 @@ const (
opType = '.' // .Type() (Object)
// type->type operators
- opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
- opKey = 'K' // .Key() (Map)
- opParams = 'P' // .Params() (Signature)
- opResults = 'R' // .Results() (Signature)
- opUnderlying = 'U' // .Underlying() (Named)
+ opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
+ opKey = 'K' // .Key() (Map)
+ opParams = 'P' // .Params() (Signature)
+ opResults = 'R' // .Results() (Signature)
+ opUnderlying = 'U' // .Underlying() (Named)
+ opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
+ opConstraint = 'C' // .Constraint() (TypeParam)
// type->object operators
- opAt = 'A' // .At(i) (Tuple)
- opField = 'F' // .Field(i) (Struct)
- opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
- opObj = 'O' // .Obj() (Named)
+ opAt = 'A' // .At(i) (Tuple)
+ opField = 'F' // .Field(i) (Struct)
+ opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
+ opObj = 'O' // .Obj() (Named, TypeParam)
)
// The For function returns the path to an object relative to its package,
@@ -190,10 +198,15 @@ func For(obj types.Object) (Path, error) {
// 3. Not a package-level object.
// Reject obviously non-viable cases.
switch obj := obj.(type) {
+ case *types.TypeName:
+ if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
+ // With the exception of type parameters, only package-level type names
+ // have a path.
+ return "", fmt.Errorf("no path for %v", obj)
+ }
case *types.Const, // Only package-level constants have a path.
- *types.TypeName, // Only package-level types have a path.
- *types.Label, // Labels are function-local.
- *types.PkgName: // PkgNames are file-local.
+ *types.Label, // Labels are function-local.
+ *types.PkgName: // PkgNames are file-local.
return "", fmt.Errorf("no path for %v", obj)
case *types.Var:
@@ -245,6 +258,12 @@ func For(obj types.Object) (Path, error) {
return Path(r), nil
}
} else {
+ if named, _ := T.(*types.Named); named != nil {
+ if r := findTypeParam(obj, typeparams.ForNamed(named), path); r != nil {
+ // generic named type
+ return Path(r), nil
+ }
+ }
// defined (named) type
if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil {
return Path(r), nil
@@ -270,8 +289,12 @@ func For(obj types.Object) (Path, error) {
// Inspect declared methods of defined types.
if T, ok := o.Type().(*types.Named); ok {
path = append(path, opType)
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
+ // Note that method index here is always with respect
+ // to canonical ordering of methods, regardless of how
+ // they appear in the underlying type.
+ canonical := canonicalize(T)
+ for i := 0; i < len(canonical); i++ {
+ m := canonical[i]
path2 := appendOpArg(path, opMethod, i)
if m == obj {
return Path(path2), nil // found declared method
@@ -313,6 +336,9 @@ func find(obj types.Object, T types.Type, path []byte) []byte {
}
return find(obj, T.Elem(), append(path, opElem))
case *types.Signature:
+ if r := findTypeParam(obj, typeparams.ForSignature(T), path); r != nil {
+ return r
+ }
if r := find(obj, T.Params(), append(path, opParams)); r != nil {
return r
}
@@ -353,10 +379,30 @@ func find(obj types.Object, T types.Type, path []byte) []byte {
}
}
return nil
+ case *typeparams.TypeParam:
+ name := T.Obj()
+ if name == obj {
+ return append(path, opObj)
+ }
+ if r := find(obj, T.Constraint(), append(path, opConstraint)); r != nil {
+ return r
+ }
+ return nil
}
panic(T)
}
+func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte) []byte {
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ path2 := appendOpArg(path, opTypeParam, i)
+ if r := find(obj, tparam, path2); r != nil {
+ return r
+ }
+ }
+ return nil
+}
+
// Object returns the object denoted by path p within the package pkg.
func Object(pkg *types.Package, p Path) (types.Object, error) {
if p == "" {
@@ -381,10 +427,13 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
type hasElem interface {
Elem() types.Type
}
- // abstraction of *types.{Interface,Named}
- type hasMethods interface {
- Method(int) *types.Func
- NumMethods() int
+ // abstraction of *types.{Named,Signature}
+ type hasTypeParams interface {
+ TypeParams() *typeparams.TypeParamList
+ }
+ // abstraction of *types.{Named,TypeParam}
+ type hasObj interface {
+ Obj() *types.TypeName
}
// The loop state is the pair (t, obj),
@@ -401,7 +450,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
// Codes [AFM] have an integer operand.
var index int
switch code {
- case opAt, opField, opMethod:
+ case opAt, opField, opMethod, opTypeParam:
rest := strings.TrimLeft(suffix, "0123456789")
numerals := suffix[:len(suffix)-len(rest)]
suffix = rest
@@ -466,14 +515,32 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
case opUnderlying:
named, ok := t.(*types.Named)
if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
}
t = named.Underlying()
+ case opTypeParam:
+ hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
+ }
+ tparams := hasTypeParams.TypeParams()
+ if n := tparams.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ t = tparams.At(index)
+
+ case opConstraint:
+ tparam, ok := t.(*typeparams.TypeParam)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
+ }
+ t = tparam.Constraint()
+
case opAt:
tuple, ok := t.(*types.Tuple)
if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t)
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
}
if n := tuple.Len(); index >= n {
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
@@ -495,20 +562,21 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
case opMethod:
hasMethods, ok := t.(hasMethods) // Interface or Named
if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t)
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
}
- if n := hasMethods.NumMethods(); index >= n {
+ canonical := canonicalize(hasMethods)
+ if n := len(canonical); index >= n {
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n)
}
- obj = hasMethods.Method(index)
+ obj = canonical[index]
t = nil
case opObj:
- named, ok := t.(*types.Named)
+ hasObj, ok := t.(hasObj)
if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
}
- obj = named.Obj()
+ obj = hasObj.Obj()
t = nil
default:
@@ -522,3 +590,28 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
return obj, nil // success
}
+
+// hasMethods is an abstraction of *types.{Interface,Named}. This is pulled up
+// because it is used by methodOrdering, which is in turn used by both encoding
+// and decoding.
+type hasMethods interface {
+ Method(int) *types.Func
+ NumMethods() int
+}
+
+// canonicalize returns a canonical order for the methods in a hasMethod.
+func canonicalize(hm hasMethods) []*types.Func {
+ count := hm.NumMethods()
+ if count <= 0 {
+ return nil
+ }
+ canon := make([]*types.Func, count)
+ for i := 0; i < count; i++ {
+ canon[i] = hm.Method(i)
+ }
+ less := func(i, j int) bool {
+ return canon[i].Id() < canon[j].Id()
+ }
+ sort.Slice(canon, less)
+ return canon
+}
diff --git a/libgo/go/golang.org/x/tools/go/types/typeutil/callee.go b/libgo/go/golang.org/x/tools/go/types/typeutil/callee.go
index 38f596daf9e..90b3ab0e21c 100644
--- a/libgo/go/golang.org/x/tools/go/types/typeutil/callee.go
+++ b/libgo/go/golang.org/x/tools/go/types/typeutil/callee.go
@@ -9,13 +9,29 @@ import (
"go/types"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/typeparams"
)
// Callee returns the named target of a function call, if any:
// a function, method, builtin, or variable.
+//
+// Functions and methods may potentially have type parameters.
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+ fun := astutil.Unparen(call.Fun)
+
+ // Look through type instantiation if necessary.
+ isInstance := false
+ switch fun.(type) {
+ case *ast.IndexExpr, *typeparams.IndexListExpr:
+ // When extracting the callee from an *IndexExpr, we need to check that
+ // it is a *types.Func and not a *types.Var.
+ // Example: Don't match a slice m within the expression `m[0]()`.
+ isInstance = true
+ fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
+ }
+
var obj types.Object
- switch fun := astutil.Unparen(call.Fun).(type) {
+ switch fun := fun.(type) {
case *ast.Ident:
obj = info.Uses[fun] // type, var, builtin, or declared func
case *ast.SelectorExpr:
@@ -28,11 +44,18 @@ func Callee(info *types.Info, call *ast.CallExpr) types.Object {
if _, ok := obj.(*types.TypeName); ok {
return nil // T(x) is a conversion, not a call
}
+ // A Func is required to match instantiations.
+ if _, ok := obj.(*types.Func); isInstance && !ok {
+ return nil // Was not a Func.
+ }
return obj
}
-// StaticCallee returns the target (function or method) of a static
-// function call, if any. It returns nil for calls to builtins.
+// StaticCallee returns the target (function or method) of a static function
+// call, if any. It returns nil for calls to builtins.
+//
+// Note: for calls of instantiated functions and methods, StaticCallee returns
+// the corresponding generic function or method on the generic type.
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
return f
diff --git a/libgo/go/golang.org/x/tools/go/types/typeutil/map.go b/libgo/go/golang.org/x/tools/go/types/typeutil/map.go
index c7f75450064..490ee904a62 100644
--- a/libgo/go/golang.org/x/tools/go/types/typeutil/map.go
+++ b/libgo/go/golang.org/x/tools/go/types/typeutil/map.go
@@ -11,6 +11,8 @@ import (
"fmt"
"go/types"
"reflect"
+
+ "golang.org/x/tools/internal/typeparams"
)
// Map is a hash-table-based mapping from types (types.Type) to
@@ -211,11 +213,29 @@ func (m *Map) KeysString() string {
// Call MakeHasher to create a Hasher.
type Hasher struct {
memo map[types.Type]uint32
+
+ // ptrMap records pointer identity.
+ ptrMap map[interface{}]uint32
+
+ // sigTParams holds type parameters from the signature being hashed.
+ // Signatures are considered identical modulo renaming of type parameters, so
+ // within the scope of a signature type the identity of the signature's type
+ // parameters is just their index.
+ //
+ // Since the language does not currently support referring to uninstantiated
+ // generic types or functions, and instantiated signatures do not have type
+ // parameter lists, we should never encounter a second non-empty type
+ // parameter list when hashing a generic signature.
+ sigTParams *typeparams.TypeParamList
}
// MakeHasher returns a new Hasher instance.
func MakeHasher() Hasher {
- return Hasher{make(map[types.Type]uint32)}
+ return Hasher{
+ memo: make(map[types.Type]uint32),
+ ptrMap: make(map[interface{}]uint32),
+ sigTParams: nil,
+ }
}
// Hash computes a hash value for the given type t such that
@@ -273,17 +293,62 @@ func (h Hasher) hashFor(t types.Type) uint32 {
if t.Variadic() {
hash *= 8863
}
+
+ // Use a separate hasher for types inside of the signature, where type
+ // parameter identity is modified to be (index, constraint). We must use a
+ // new memo for this hasher as type identity may be affected by this
+ // masking. For example, in func[T any](*T), the identity of *T depends on
+ // whether we are mapping the argument in isolation, or recursively as part
+ // of hashing the signature.
+ //
+ // We should never encounter a generic signature while hashing another
+ // generic signature, but defensively set sigTParams only if h.mask is
+ // unset.
+ tparams := typeparams.ForSignature(t)
+ if h.sigTParams == nil && tparams.Len() != 0 {
+ h = Hasher{
+ // There may be something more efficient than discarding the existing
+ // memo, but it would require detecting whether types are 'tainted' by
+ // references to type parameters.
+ memo: make(map[types.Type]uint32),
+ // Re-using ptrMap ensures that pointer identity is preserved in this
+ // hasher.
+ ptrMap: h.ptrMap,
+ sigTParams: tparams,
+ }
+ }
+
+ for i := 0; i < tparams.Len(); i++ {
+ tparam := tparams.At(i)
+ hash += 7 * h.Hash(tparam.Constraint())
+ }
+
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+ case *typeparams.Union:
+ return h.hashUnion(t)
+
case *types.Interface:
+ // Interfaces are identical if they have the same set of methods, with
+ // identical names and types, and they have the same set of type
+ // restrictions. See go/types.identical for more details.
var hash uint32 = 9103
+
+ // Hash methods.
for i, n := 0, t.NumMethods(); i < n; i++ {
- // See go/types.identicalMethods for rationale.
// Method order is not significant.
// Ignore m.Pkg().
m := t.Method(i)
hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
}
+
+ // Hash type restrictions.
+ terms, err := typeparams.InterfaceTermSet(t)
+ // if err != nil t has invalid type restrictions.
+ if err == nil {
+ hash += h.hashTermSet(terms)
+ }
+
return hash
case *types.Map:
@@ -293,13 +358,22 @@ func (h Hasher) hashFor(t types.Type) uint32 {
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
case *types.Named:
- // Not safe with a copying GC; objects may move.
- return uint32(reflect.ValueOf(t.Obj()).Pointer())
+ hash := h.hashPtr(t.Obj())
+ targs := typeparams.NamedTypeArgs(t)
+ for i := 0; i < targs.Len(); i++ {
+ targ := targs.At(i)
+ hash += 2 * h.Hash(targ)
+ }
+ return hash
+
+ case *typeparams.TypeParam:
+ return h.hashTypeParam(t)
case *types.Tuple:
return h.hashTuple(t)
}
- panic(t)
+
+ panic(fmt.Sprintf("%T: %v", t, t))
}
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
@@ -311,3 +385,57 @@ func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
}
return hash
}
+
+func (h Hasher) hashUnion(t *typeparams.Union) uint32 {
+ // Hash type restrictions.
+ terms, err := typeparams.UnionTermSet(t)
+ // if err != nil t has invalid type restrictions. Fall back on a non-zero
+ // hash.
+ if err != nil {
+ return 9151
+ }
+ return h.hashTermSet(terms)
+}
+
+func (h Hasher) hashTermSet(terms []*typeparams.Term) uint32 {
+ var hash uint32 = 9157 + 2*uint32(len(terms))
+ for _, term := range terms {
+ // term order is not significant.
+ termHash := h.Hash(term.Type())
+ if term.Tilde() {
+ termHash *= 9161
+ }
+ hash += 3 * termHash
+ }
+ return hash
+}
+
+// hashTypeParam returns a hash of the type parameter t, with a hash value
+// depending on whether t is contained in h.sigTParams.
+//
+// If h.sigTParams is set and contains t, then we are in the process of hashing
+// a signature, and the hash value of t must depend only on t's index and
+// constraint: signatures are considered identical modulo type parameter
+// renaming.
+//
+// Otherwise the hash of t depends only on t's pointer identity.
+func (h Hasher) hashTypeParam(t *typeparams.TypeParam) uint32 {
+ if h.sigTParams != nil {
+ i := t.Index()
+ if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
+ return 9173 + 2*h.Hash(t.Constraint()) + 3*uint32(i)
+ }
+ }
+ return h.hashPtr(t.Obj())
+}
+
+// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
+// pointers values are not dependent on the GC.
+func (h Hasher) hashPtr(ptr interface{}) uint32 {
+ if hash, ok := h.ptrMap[ptr]; ok {
+ return hash
+ }
+ hash := uint32(reflect.ValueOf(ptr).Pointer())
+ h.ptrMap[ptr] = hash
+ return hash
+}
diff --git a/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/input.go b/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/input.go
index ac377035ec6..c1038163f1a 100644
--- a/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/input.go
+++ b/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/input.go
@@ -27,23 +27,23 @@ const (
// RuneRoles detects the roles of each byte rune in an input string and stores it in the output
// slice. The rune role depends on the input type. Stops when it parsed all the runes in the string
// or when it filled the output. If output is nil, then it gets created.
-func RuneRoles(str string, reuse []RuneRole) []RuneRole {
+func RuneRoles(candidate []byte, reuse []RuneRole) []RuneRole {
var output []RuneRole
- if cap(reuse) < len(str) {
- output = make([]RuneRole, 0, len(str))
+ if cap(reuse) < len(candidate) {
+ output = make([]RuneRole, 0, len(candidate))
} else {
output = reuse[:0]
}
prev, prev2 := rtNone, rtNone
- for i := 0; i < len(str); i++ {
- r := rune(str[i])
+ for i := 0; i < len(candidate); i++ {
+ r := rune(candidate[i])
role := RNone
curr := rtLower
- if str[i] <= unicode.MaxASCII {
- curr = runeType(rt[str[i]] - '0')
+ if candidate[i] <= unicode.MaxASCII {
+ curr = runeType(rt[candidate[i]] - '0')
}
if curr == rtLower {
@@ -58,7 +58,7 @@ func RuneRoles(str string, reuse []RuneRole) []RuneRole {
if prev == rtUpper {
// This and previous characters are both upper case.
- if i+1 == len(str) {
+ if i+1 == len(candidate) {
// This is last character, previous was also uppercase -> this is UCTail
// i.e., (current char is C): aBC / BC / ABC
role = RUCTail
@@ -118,11 +118,26 @@ func LastSegment(input string, roles []RuneRole) string {
return input[start+1 : end+1]
}
-// ToLower transforms the input string to lower case, which is stored in the output byte slice.
+// fromChunks copies string chunks into the given buffer.
+func fromChunks(chunks []string, buffer []byte) []byte {
+ ii := 0
+ for _, chunk := range chunks {
+ for i := 0; i < len(chunk); i++ {
+ if ii >= cap(buffer) {
+ break
+ }
+ buffer[ii] = chunk[i]
+ ii++
+ }
+ }
+ return buffer[:ii]
+}
+
+// toLower transforms the input string to lower case, which is stored in the output byte slice.
// The lower casing considers only ASCII values - non ASCII values are left unmodified.
// Stops when parsed all input or when it filled the output slice. If output is nil, then it gets
// created.
-func ToLower(input string, reuse []byte) []byte {
+func toLower(input []byte, reuse []byte) []byte {
output := reuse
if cap(reuse) < len(input) {
output = make([]byte, len(input))
@@ -130,7 +145,7 @@ func ToLower(input string, reuse []byte) []byte {
for i := 0; i < len(input); i++ {
r := rune(input[i])
- if r <= unicode.MaxASCII {
+ if input[i] <= unicode.MaxASCII {
if 'A' <= r && r <= 'Z' {
r += 'a' - 'A'
}
diff --git a/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/matcher.go b/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
index 16a643097de..265cdcf1604 100644
--- a/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
+++ b/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
@@ -51,8 +51,12 @@ type Matcher struct {
lastCandidateLen int // in bytes
lastCandidateMatched bool
- // Here we save the last candidate in lower-case. This is basically a byte slice we reuse for
- // performance reasons, so the slice is not reallocated for every candidate.
+ // Reusable buffers to avoid allocating for every candidate.
+ // - inputBuf stores the concatenated input chunks
+ // - lowerBuf stores the last candidate in lower-case
+ // - rolesBuf stores the calculated roles for each rune in the last
+ // candidate.
+ inputBuf [MaxInputSize]byte
lowerBuf [MaxInputSize]byte
rolesBuf [MaxInputSize]RuneRole
}
@@ -72,7 +76,7 @@ func NewMatcher(pattern string) *Matcher {
m := &Matcher{
pattern: pattern,
- patternLower: ToLower(pattern, nil),
+ patternLower: toLower([]byte(pattern), nil),
}
for i, c := range m.patternLower {
@@ -88,7 +92,7 @@ func NewMatcher(pattern string) *Matcher {
m.patternShort = m.patternLower
}
- m.patternRoles = RuneRoles(pattern, nil)
+ m.patternRoles = RuneRoles([]byte(pattern), nil)
if len(pattern) > 0 {
maxCharScore := 4
@@ -102,10 +106,15 @@ func NewMatcher(pattern string) *Matcher {
// This is not designed for parallel use. Multiple candidates must be scored sequentially.
// Returns a score between 0 and 1 (0 - no match, 1 - perfect match).
func (m *Matcher) Score(candidate string) float32 {
+ return m.ScoreChunks([]string{candidate})
+}
+
+func (m *Matcher) ScoreChunks(chunks []string) float32 {
+ candidate := fromChunks(chunks, m.inputBuf[:])
if len(candidate) > MaxInputSize {
candidate = candidate[:MaxInputSize]
}
- lower := ToLower(candidate, m.lowerBuf[:])
+ lower := toLower(candidate, m.lowerBuf[:])
m.lastCandidateLen = len(candidate)
if len(m.pattern) == 0 {
@@ -174,7 +183,7 @@ func (m *Matcher) MatchedRanges() []int {
return ret
}
-func (m *Matcher) match(candidate string, candidateLower []byte) bool {
+func (m *Matcher) match(candidate []byte, candidateLower []byte) bool {
i, j := 0, 0
for ; i < len(candidateLower) && j < len(m.patternLower); i++ {
if candidateLower[i] == m.patternLower[j] {
@@ -192,7 +201,7 @@ func (m *Matcher) match(candidate string, candidateLower []byte) bool {
return true
}
-func (m *Matcher) computeScore(candidate string, candidateLower []byte) int {
+func (m *Matcher) computeScore(candidate []byte, candidateLower []byte) int {
pattLen, candLen := len(m.pattern), len(candidate)
for j := 0; j <= len(m.pattern); j++ {
diff --git a/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/symbol.go b/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/symbol.go
new file mode 100644
index 00000000000..df9fbd51410
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/lsp/fuzzy/symbol.go
@@ -0,0 +1,236 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy
+
+import (
+ "unicode"
+)
+
+// SymbolMatcher implements a fuzzy matching algorithm optimized for Go symbols
+// of the form:
+// example.com/path/to/package.object.field
+//
+// Knowing that we are matching symbols like this allows us to make the
+// following optimizations:
+// - We can incorporate right-to-left relevance directly into the score
+// calculation.
+// - We can match from right to left, discarding leading bytes if the input is
+// too long.
+// - We just take the right-most match without losing too much precision. This
+// allows us to use an O(n) algorithm.
+// - We can operate directly on chunked strings; in many cases we will
+// be storing the package path and/or package name separately from the
+// symbol or identifiers, so doing this avoids allocating strings.
+// - We can return the index of the right-most match, allowing us to trim
+// irrelevant qualification.
+//
+// This implementation is experimental, serving as a reference fast algorithm
+// to compare to the fuzzy algorithm implemented by Matcher.
+type SymbolMatcher struct {
+ // Using buffers of length 256 is both a reasonable size for most qualified
+ // symbols, and makes it easy to avoid bounds checks by using uint8 indexes.
+ pattern [256]rune
+ patternLen uint8
+ inputBuffer [256]rune // avoid allocating when considering chunks
+ roles [256]uint32 // which roles does a rune play (word start, etc.)
+ segments [256]uint8 // how many segments from the right is each rune
+}
+
+const (
+ segmentStart uint32 = 1 << iota
+ wordStart
+ separator
+)
+
+// NewSymbolMatcher creates a SymbolMatcher that may be used to match the given
+// search pattern.
+//
+// Currently this matcher only accepts case-insensitive fuzzy patterns.
+//
+// An empty pattern matches no input.
+func NewSymbolMatcher(pattern string) *SymbolMatcher {
+ m := &SymbolMatcher{}
+ for _, p := range pattern {
+ m.pattern[m.patternLen] = unicode.ToLower(p)
+ m.patternLen++
+ if m.patternLen == 255 || int(m.patternLen) == len(pattern) {
+ // break at 255 so that we can represent patternLen with a uint8.
+ break
+ }
+ }
+ return m
+}
+
+// Match looks for the right-most match of the search pattern within the symbol
+// represented by concatenating the given chunks, returning its offset and
+// score.
+//
+// If a match is found, the first return value will hold the absolute byte
+// offset within all chunks for the start of the symbol. In other words, the
+// index of the match within strings.Join(chunks, ""). If no match is found,
+// the first return value will be -1.
+//
+// The second return value will be the score of the match, which is always
+// between 0 and 1, inclusive. A score of 0 indicates no match.
+func (m *SymbolMatcher) Match(chunks []string) (int, float64) {
+ // Explicit behavior for an empty pattern.
+ //
+ // As a minor optimization, this also avoids nilness checks later on, since
+ // the compiler can prove that m != nil.
+ if m.patternLen == 0 {
+ return -1, 0
+ }
+
+ // First phase: populate the input buffer with lower-cased runes.
+ //
+ // We could also check for a forward match here, but since we'd have to write
+ // the entire input anyway this has negligible impact on performance.
+
+ var (
+ inputLen = uint8(0)
+ modifiers = wordStart | segmentStart
+ )
+
+input:
+ for _, chunk := range chunks {
+ for _, r := range chunk {
+ if r == '.' || r == '/' {
+ modifiers |= separator
+ }
+ // optimization: avoid calls to unicode.ToLower, which can't be inlined.
+ l := r
+ if r <= unicode.MaxASCII {
+ if 'A' <= r && r <= 'Z' {
+ l = r + 'a' - 'A'
+ }
+ } else {
+ l = unicode.ToLower(r)
+ }
+ if l != r {
+ modifiers |= wordStart
+ }
+ m.inputBuffer[inputLen] = l
+ m.roles[inputLen] = modifiers
+ inputLen++
+ if m.roles[inputLen-1]&separator != 0 {
+ modifiers = wordStart | segmentStart
+ } else {
+ modifiers = 0
+ }
+ // TODO: we should prefer the right-most input if it overflows, rather
+ // than the left-most as we're doing here.
+ if inputLen == 255 {
+ break input
+ }
+ }
+ }
+
+ // Second phase: find the right-most match, and count segments from the
+ // right.
+
+ var (
+ pi = uint8(m.patternLen - 1) // pattern index
+ p = m.pattern[pi] // pattern rune
+ start = -1 // start offset of match
+ rseg = uint8(0)
+ )
+ const maxSeg = 3 // maximum number of segments from the right to count, for scoring purposes.
+
+ for ii := inputLen - 1; ; ii-- {
+ r := m.inputBuffer[ii]
+ if rseg < maxSeg && m.roles[ii]&separator != 0 {
+ rseg++
+ }
+ m.segments[ii] = rseg
+ if p == r {
+ if pi == 0 {
+ start = int(ii)
+ break
+ }
+ pi--
+ p = m.pattern[pi]
+ }
+ // Don't check ii >= 0 in the loop condition: ii is a uint8.
+ if ii == 0 {
+ break
+ }
+ }
+
+ if start < 0 {
+ // no match: skip scoring
+ return -1, 0
+ }
+
+ // Third phase: find the shortest match, and compute the score.
+
+ // Score is the average score for each character.
+ //
+ // A character score is the multiple of:
+ // 1. 1.0 if the character starts a segment, .8 if the character start a
+ // mid-segment word, otherwise 0.6. This carries over to immediately
+ // following characters.
+ // 2. For the final character match, the multiplier from (1) is reduced to
+ // .8 if the next character in the input is a mid-segment word, or 0.6 if
+ // the next character in the input is not a word or segment start. This
+ // ensures that we favor whole-word or whole-segment matches over prefix
+ // matches.
+ // 3. 1.0 if the character is part of the last segment, otherwise
+ // 1.0-.2*<segments from the right>, with a max segment count of 3.
+ //
+ // This is a very naive algorithm, but it is fast. There's lots of prior art
+ // here, and we should leverage it. For example, we could explicitly consider
+ // character distance, and exact matches of words or segments.
+ //
+ // Also note that this might not actually find the highest scoring match, as
+ // doing so could require a non-linear algorithm, depending on how the score
+ // is calculated.
+
+ pi = 0
+ p = m.pattern[pi]
+
+ const (
+ segStreak = 1.0
+ wordStreak = 0.8
+ noStreak = 0.6
+ perSegment = 0.2 // we count at most 3 segments above
+ )
+
+ streakBonus := noStreak
+ totScore := 0.0
+ for ii := uint8(start); ii < inputLen; ii++ {
+ r := m.inputBuffer[ii]
+ if r == p {
+ pi++
+ p = m.pattern[pi]
+ // Note: this could be optimized with some bit operations.
+ switch {
+ case m.roles[ii]&segmentStart != 0 && segStreak > streakBonus:
+ streakBonus = segStreak
+ case m.roles[ii]&wordStart != 0 && wordStreak > streakBonus:
+ streakBonus = wordStreak
+ }
+ finalChar := pi >= m.patternLen
+ // finalCost := 1.0
+ if finalChar && streakBonus > noStreak {
+ switch {
+ case ii == inputLen-1 || m.roles[ii+1]&segmentStart != 0:
+ // Full segment: no reduction
+ case m.roles[ii+1]&wordStart != 0:
+ streakBonus = wordStreak
+ default:
+ streakBonus = noStreak
+ }
+ }
+ totScore += streakBonus * (1.0 - float64(m.segments[ii])*perSegment)
+ if finalChar {
+ break
+ }
+ } else {
+ streakBonus = noStreak
+ }
+ }
+
+ return start, totScore / float64(m.patternLen)
+}
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/common.go b/libgo/go/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 00000000000..1222764b6a3
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,79 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that interact
+// with generic Go code, as introduced with Go 1.18.
+//
+// Many of the types and functions in this package are proxies for the new APIs
+// introduced in the standard library with Go 1.18. For example, the
+// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
+// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
+// versions older than 1.18 these helpers are implemented as stubs, allowing
+// users of this package to write code that handles generic constructs inline,
+// even if the Go version being used to compile does not support generics.
+//
+// Additionally, this package contains common utilities for working with the
+// new generic constructs, to supplement the standard library APIs. Notably,
+// the StructuralTerms API computes a minimal representation of the structural
+// restrictions on a type parameter. In the future, this API may be available
+// from go/types.
+//
+// See the example/README.md for a more detailed guide on how to update tools
+// to support generics.
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+ case *IndexListExpr:
+ return e.X, e.Lbrack, e.Indices, e.Rbrack
+ }
+ return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(indices) {
+ case 0:
+ panic("empty indices")
+ case 1:
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: indices[0],
+ Rbrack: rbrack,
+ }
+ default:
+ return &IndexListExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: indices,
+ Rbrack: rbrack,
+ }
+ }
+}
+
+// IsTypeParam reports whether t is a type parameter.
+func IsTypeParam(t types.Type) bool {
+ _, ok := t.(*TypeParam)
+ return ok
+}
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/enabled_go117.go b/libgo/go/golang.org/x/tools/internal/typeparams/enabled_go117.go
new file mode 100644
index 00000000000..18212390e19
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/enabled_go117.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package typeparams
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = false
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/enabled_go118.go b/libgo/go/golang.org/x/tools/internal/typeparams/enabled_go118.go
new file mode 100644
index 00000000000..d67148823c4
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/enabled_go118.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+// Note: this constant is in a separate file as this is the only acceptable
+// diff between the <1.18 API of this package and the 1.18 API.
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = true
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/normalize.go b/libgo/go/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 00000000000..090f142a5f3
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,216 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+// type T[P interface{~int; m()}] int
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *TypeParam) ([]*Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *Union) ([]*Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*Term
+ for _, term := range tset.terms {
+ terms = append(terms, NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ panic("nil type")
+ }
+
+ if debug {
+ indentf(depth, "%s", t.String())
+ defer func() {
+ if err != nil {
+ indentf(depth, "=> %s", err)
+ } else {
+ indentf(depth, "=> %s", res.terms.String())
+ }
+ }()
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *TypeParam, *Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *TypeParam:
+ panic("unreachable")
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/termlist.go b/libgo/go/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 00000000000..10857d504c4
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,172 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "bytes"
+ "go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 βˆͺ y2 βˆͺ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "βˆ…"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" βˆͺ ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓀 term, the entire list is 𝓀.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓀 term, the entire list is 𝓀.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓀 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// If the type set represented by xl is specified by a single (non-𝓀) term,
+// structuralType returns that type. Otherwise it returns nil.
+func (xl termlist) structuralType() types.Type {
+ if nl := xl.norm(); len(nl) == 1 {
+ return nl[0].typ // if nl.isAll() then typ is nil, which is ok
+ }
+ return nil
+}
+
+// union returns the union xl βˆͺ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y βŠ† xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl βŠ† yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go117.go
new file mode 100644
index 00000000000..5fd3fc35156
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go117.go
@@ -0,0 +1,192 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+func unsupported() {
+ panic("type parameters are unsupported at this go version")
+}
+
+// IndexListExpr is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type IndexListExpr struct {
+ ast.Expr
+ X ast.Expr // expression
+ Lbrack token.Pos // position of "["
+ Indices []ast.Expr // index expressions
+ Rbrack token.Pos // position of "]"
+}
+
+// ForTypeSpec returns an empty field list, as type parameters on not supported
+// at this Go version.
+func ForTypeSpec(*ast.TypeSpec) *ast.FieldList {
+ return nil
+}
+
+// ForFuncType returns an empty field list, as type parameters are not
+// supported at this Go version.
+func ForFuncType(*ast.FuncType) *ast.FieldList {
+ return nil
+}
+
+// TypeParam is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type TypeParam struct{ types.Type }
+
+func (*TypeParam) Index() int { unsupported(); return 0 }
+func (*TypeParam) Constraint() types.Type { unsupported(); return nil }
+func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil }
+
+// TypeParamList is a placeholder for an empty type parameter list.
+type TypeParamList struct{}
+
+func (*TypeParamList) Len() int { return 0 }
+func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil }
+
+// TypeList is a placeholder for an empty type list.
+type TypeList struct{}
+
+func (*TypeList) Len() int { return 0 }
+func (*TypeList) At(int) types.Type { unsupported(); return nil }
+
+// NewTypeParam is unsupported at this Go version, and panics.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+ unsupported()
+ return nil
+}
+
+// SetTypeParamConstraint is unsupported at this Go version, and panics.
+func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
+ unsupported()
+}
+
+// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or
+// typeParams is non-empty.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+ if len(recvTypeParams) != 0 || len(typeParams) != 0 {
+ panic("signatures cannot have type parameters at this Go version")
+ }
+ return types.NewSignature(recv, params, results, variadic)
+}
+
+// ForSignature returns an empty slice.
+func ForSignature(*types.Signature) *TypeParamList {
+ return nil
+}
+
+// RecvTypeParams returns a nil slice.
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+ return nil
+}
+
+// IsComparable returns false, as no interfaces are type-restricted at this Go
+// version.
+func IsComparable(*types.Interface) bool {
+ return false
+}
+
+// IsMethodSet returns true, as no interfaces are type-restricted at this Go
+// version.
+func IsMethodSet(*types.Interface) bool {
+ return true
+}
+
+// IsImplicit returns false, as no interfaces are implicit at this Go version.
+func IsImplicit(*types.Interface) bool {
+ return false
+}
+
+// MarkImplicit does nothing, because this Go version does not have implicit
+// interfaces.
+func MarkImplicit(*types.Interface) {}
+
+// ForNamed returns an empty type parameter list, as type parameters are not
+// supported at this Go version.
+func ForNamed(*types.Named) *TypeParamList {
+ return nil
+}
+
+// SetForNamed panics if tparams is non-empty.
+func SetForNamed(_ *types.Named, tparams []*TypeParam) {
+ if len(tparams) > 0 {
+ unsupported()
+ }
+}
+
+// NamedTypeArgs returns nil.
+func NamedTypeArgs(*types.Named) *TypeList {
+ return nil
+}
+
+// NamedTypeOrigin is the identity method at this Go version.
+func NamedTypeOrigin(named *types.Named) types.Type {
+ return named
+}
+
+// Term holds information about a structural type restriction.
+type Term struct {
+ tilde bool
+ typ types.Type
+}
+
+func (m *Term) Tilde() bool { return m.tilde }
+func (m *Term) Type() types.Type { return m.typ }
+func (m *Term) String() string {
+ pre := ""
+ if m.tilde {
+ pre = "~"
+ }
+ return pre + m.typ.String()
+}
+
+// NewTerm is unsupported at this Go version, and panics.
+func NewTerm(tilde bool, typ types.Type) *Term {
+ return &Term{tilde, typ}
+}
+
+// Union is a placeholder type, as type parameters are not supported at this Go
+// version. Its methods panic on use.
+type Union struct{ types.Type }
+
+func (*Union) Len() int { return 0 }
+func (*Union) Term(i int) *Term { unsupported(); return nil }
+
+// NewUnion is unsupported at this Go version, and panics.
+func NewUnion(terms []*Term) *Union {
+ unsupported()
+ return nil
+}
+
+// InitInstanceInfo is a noop at this Go version.
+func InitInstanceInfo(*types.Info) {}
+
+// Instance is a placeholder type, as type parameters are not supported at this
+// Go version.
+type Instance struct {
+ TypeArgs *TypeList
+ Type types.Type
+}
+
+// GetInstances returns a nil map, as type parameters are not supported at this
+// Go version.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil }
+
+// Context is a placeholder type, as type parameters are not supported at
+// this Go version.
+type Context struct{}
+
+// Instantiate is unsupported on this Go version, and panics.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+ unsupported()
+ return nil, nil
+}
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go118.go
new file mode 100644
index 00000000000..7470aed8c99
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/typeparams_go118.go
@@ -0,0 +1,146 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// IndexListExpr is an alias for ast.IndexListExpr.
+type IndexListExpr = ast.IndexListExpr
+
+// ForTypeSpec returns n.TypeParams.
+func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList {
+ if n == nil {
+ return nil
+ }
+ return n.TypeParams
+}
+
+// ForFuncType returns n.TypeParams.
+func ForFuncType(n *ast.FuncType) *ast.FieldList {
+ if n == nil {
+ return nil
+ }
+ return n.TypeParams
+}
+
+// TypeParam is an alias for types.TypeParam
+type TypeParam = types.TypeParam
+
+// TypeParamList is an alias for types.TypeParamList
+type TypeParamList = types.TypeParamList
+
+// TypeList is an alias for types.TypeList
+type TypeList = types.TypeList
+
+// NewTypeParam calls types.NewTypeParam.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+ return types.NewTypeParam(name, constraint)
+}
+
+// SetTypeParamConstraint calls tparam.SetConstraint(constraint).
+func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
+ tparam.SetConstraint(constraint)
+}
+
+// NewSignatureType calls types.NewSignatureType.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+ return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic)
+}
+
+// ForSignature returns sig.TypeParams()
+func ForSignature(sig *types.Signature) *TypeParamList {
+ return sig.TypeParams()
+}
+
+// RecvTypeParams returns sig.RecvTypeParams().
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+ return sig.RecvTypeParams()
+}
+
+// IsComparable calls iface.IsComparable().
+func IsComparable(iface *types.Interface) bool {
+ return iface.IsComparable()
+}
+
+// IsMethodSet calls iface.IsMethodSet().
+func IsMethodSet(iface *types.Interface) bool {
+ return iface.IsMethodSet()
+}
+
+// IsImplicit calls iface.IsImplicit().
+func IsImplicit(iface *types.Interface) bool {
+ return iface.IsImplicit()
+}
+
+// MarkImplicit calls iface.MarkImplicit().
+func MarkImplicit(iface *types.Interface) {
+ iface.MarkImplicit()
+}
+
+// ForNamed extracts the (possibly empty) type parameter object list from
+// named.
+func ForNamed(named *types.Named) *TypeParamList {
+ return named.TypeParams()
+}
+
+// SetForNamed sets the type params tparams on n. Each tparam must be of
+// dynamic type *types.TypeParam.
+func SetForNamed(n *types.Named, tparams []*TypeParam) {
+ n.SetTypeParams(tparams)
+}
+
+// NamedTypeArgs returns named.TypeArgs().
+func NamedTypeArgs(named *types.Named) *TypeList {
+ return named.TypeArgs()
+}
+
+// NamedTypeOrigin returns named.Orig().
+func NamedTypeOrigin(named *types.Named) types.Type {
+ return named.Origin()
+}
+
+// Term is an alias for types.Term.
+type Term = types.Term
+
+// NewTerm calls types.NewTerm.
+func NewTerm(tilde bool, typ types.Type) *Term {
+ return types.NewTerm(tilde, typ)
+}
+
+// Union is an alias for types.Union
+type Union = types.Union
+
+// NewUnion calls types.NewUnion.
+func NewUnion(terms []*Term) *Union {
+ return types.NewUnion(terms)
+}
+
+// InitInstanceInfo initializes info to record information about type and
+// function instances.
+func InitInstanceInfo(info *types.Info) {
+ info.Instances = make(map[*ast.Ident]types.Instance)
+}
+
+// Instance is an alias for types.Instance.
+type Instance = types.Instance
+
+// GetInstances returns info.Instances.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance {
+ return info.Instances
+}
+
+// Context is an alias for types.Context.
+type Context = types.Context
+
+// Instantiate calls types.Instantiate.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+ return types.Instantiate(ctxt, typ, targs, validate)
+}
diff --git a/libgo/go/golang.org/x/tools/internal/typeparams/typeterm.go b/libgo/go/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 00000000000..7ddee28d987
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,170 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// βˆ…: (*term)(nil) == βˆ… // set of no types (empty set)
+// 𝓀: &term{} == 𝓀 // set of all types (𝓀niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+//
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "βˆ…"
+ case x.typ == nil:
+ return "𝓀"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x βˆͺ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // βˆ… βˆͺ βˆ… == βˆ…
+ case x == nil:
+ return y, nil // βˆ… βˆͺ y == y
+ case y == nil:
+ return x, nil // x βˆͺ βˆ… == x
+ case x.typ == nil:
+ return x, nil // 𝓀 βˆͺ y == 𝓀
+ case y.typ == nil:
+ return y, nil // x βˆͺ 𝓀 == 𝓀
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return x, y // x βˆͺ y == (x, y) if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t βˆͺ ~t == ~t
+ // ~t βˆͺ T == ~t
+ // T βˆͺ ~t == ~t
+ // T βˆͺ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // βˆ… ∩ y == βˆ… and ∩ βˆ… == βˆ…
+ case x.typ == nil:
+ return y // 𝓀 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓀 == x
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == βˆ… if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ βˆ… == false
+ case x.typ == nil:
+ return true // t ∈ 𝓀 == true
+ }
+ // βˆ… βŠ‚ x βŠ‚ 𝓀
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x βŠ† y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // βˆ… βŠ† y == true
+ case y == nil:
+ return false // x βŠ† βˆ… == false since x != βˆ…
+ case y.typ == nil:
+ return true // x βŠ† 𝓀 == true
+ case x.typ == nil:
+ return false // 𝓀 βŠ† y == false since y != 𝓀
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return false // x βŠ† y == false if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t βŠ† ~t == true
+ // ~t βŠ† T == false
+ // T βŠ† ~t == true
+ // T βŠ† T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == βˆ….
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/libgo/go/golang.org/x/tools/txtar/archive.go b/libgo/go/golang.org/x/tools/txtar/archive.go
new file mode 100644
index 00000000000..214256617b5
--- /dev/null
+++ b/libgo/go/golang.org/x/tools/txtar/archive.go
@@ -0,0 +1,140 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package txtar implements a trivial text-based file archive format.
+//
+// The goals for the format are:
+//
+// - be trivial enough to create and edit by hand.
+// - be able to store trees of text files describing go command test cases.
+// - diff nicely in git history and code reviews.
+//
+// Non-goals include being a completely general archive format,
+// storing binary data, storing file modes, storing special files like
+// symbolic links, and so on.
+//
+// Txtar format
+//
+// A txtar archive is zero or more comment lines and then a sequence of file entries.
+// Each file entry begins with a file marker line of the form "-- FILENAME --"
+// and is followed by zero or more file content lines making up the file data.
+// The comment or file content ends at the next file marker line.
+// The file marker line must begin with the three-byte sequence "-- "
+// and end with the three-byte sequence " --", but the enclosed
+// file name can be surrounding by additional white space,
+// all of which is stripped.
+//
+// If the txtar file is missing a trailing newline on the final line,
+// parsers should consider a final newline to be present anyway.
+//
+// There are no possible syntax errors in a txtar archive.
+package txtar
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "strings"
+)
+
+// An Archive is a collection of files.
+type Archive struct {
+ Comment []byte
+ Files []File
+}
+
+// A File is a single file in an archive.
+type File struct {
+ Name string // name of file ("foo/bar.txt")
+ Data []byte // text content of file
+}
+
+// Format returns the serialized form of an Archive.
+// It is assumed that the Archive data structure is well-formed:
+// a.Comment and all a.File[i].Data contain no file marker lines,
+// and all a.File[i].Name is non-empty.
+func Format(a *Archive) []byte {
+ var buf bytes.Buffer
+ buf.Write(fixNL(a.Comment))
+ for _, f := range a.Files {
+ fmt.Fprintf(&buf, "-- %s --\n", f.Name)
+ buf.Write(fixNL(f.Data))
+ }
+ return buf.Bytes()
+}
+
+// ParseFile parses the named file as an archive.
+func ParseFile(file string) (*Archive, error) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ return Parse(data), nil
+}
+
+// Parse parses the serialized form of an Archive.
+// The returned Archive holds slices of data.
+func Parse(data []byte) *Archive {
+ a := new(Archive)
+ var name string
+ a.Comment, name, data = findFileMarker(data)
+ for name != "" {
+ f := File{name, nil}
+ f.Data, name, data = findFileMarker(data)
+ a.Files = append(a.Files, f)
+ }
+ return a
+}
+
+var (
+ newlineMarker = []byte("\n-- ")
+ marker = []byte("-- ")
+ markerEnd = []byte(" --")
+)
+
+// findFileMarker finds the next file marker in data,
+// extracts the file name, and returns the data before the marker,
+// the file name, and the data after the marker.
+// If there is no next marker, findFileMarker returns before = fixNL(data), name = "", after = nil.
+func findFileMarker(data []byte) (before []byte, name string, after []byte) {
+ var i int
+ for {
+ if name, after = isMarker(data[i:]); name != "" {
+ return data[:i], name, after
+ }
+ j := bytes.Index(data[i:], newlineMarker)
+ if j < 0 {
+ return fixNL(data), "", nil
+ }
+ i += j + 1 // positioned at start of new possible marker
+ }
+}
+
+// isMarker checks whether data begins with a file marker line.
+// If so, it returns the name from the line and the data after the line.
+// Otherwise it returns name == "" with an unspecified after.
+func isMarker(data []byte) (name string, after []byte) {
+ if !bytes.HasPrefix(data, marker) {
+ return "", nil
+ }
+ if i := bytes.IndexByte(data, '\n'); i >= 0 {
+ data, after = data[:i], data[i+1:]
+ }
+ if !(bytes.HasSuffix(data, markerEnd) && len(data) >= len(marker)+len(markerEnd)) {
+ return "", nil
+ }
+ return strings.TrimSpace(string(data[len(marker) : len(data)-len(markerEnd)])), after
+}
+
+// If data is empty or ends in \n, fixNL returns data.
+// Otherwise fixNL returns a new slice consisting of data with a final \n added.
+func fixNL(data []byte) []byte {
+ if len(data) == 0 || data[len(data)-1] == '\n' {
+ return data
+ }
+ d := make([]byte, len(data)+1)
+ copy(d, data)
+ d[len(data)] = '\n'
+ return d
+}