summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hack/make/.go-autogen4
-rw-r--r--vendor.mod59
-rw-r--r--vendor.sum217
-rw-r--r--vendor/cloud.google.com/go/.release-please-manifest-submodules.json107
-rw-r--r--vendor/cloud.google.com/go/.release-please-manifest.json3
-rw-r--r--vendor/cloud.google.com/go/CHANGES.md204
-rw-r--r--vendor/cloud.google.com/go/CONTRIBUTING.md2
-rw-r--r--vendor/cloud.google.com/go/README.md71
-rw-r--r--vendor/cloud.google.com/go/RELEASING.md12
-rw-r--r--vendor/cloud.google.com/go/compute/LICENSE202
-rw-r--r--vendor/cloud.google.com/go/compute/metadata/metadata.go50
-rw-r--r--vendor/cloud.google.com/go/compute/metadata/retry.go114
-rw-r--r--vendor/cloud.google.com/go/compute/metadata/retry_linux.go26
-rw-r--r--vendor/cloud.google.com/go/doc.go44
-rw-r--r--vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json322
-rw-r--r--vendor/cloud.google.com/go/release-please-config.json10
-rw-r--r--vendor/cloud.google.com/go/testing.md4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go41
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go78
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go5
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go60
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go180
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go19
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go1
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go1705
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go16
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go19
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go27
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go (renamed from vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go)4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go (renamed from vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go)2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go (renamed from vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go)2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/doc.go44
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go35
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go250
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go93
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/host.go44
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go59
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go134
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go278
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sso/api.go1210
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sso/doc.go44
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sso/errors.go44
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sso/service.go104
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go86
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go10
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/doc.go86
-rw-r--r--vendor/github.com/bsphere/le_go/.travis.yml2
-rw-r--r--vendor/github.com/bsphere/le_go/le.go34
-rw-r--r--vendor/github.com/fernet/fernet-go/fernet.go6
-rw-r--r--vendor/github.com/fernet/fernet-go/invalid.json14
-rw-r--r--vendor/github.com/google/certificate-transparency-go/.gitignore1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/.golangci.yaml38
-rw-r--r--vendor/github.com/google/certificate-transparency-go/.travis.yml74
-rw-r--r--vendor/github.com/google/certificate-transparency-go/AUTHORS2
-rw-r--r--vendor/github.com/google/certificate-transparency-go/CHANGELOG.md458
-rw-r--r--vendor/github.com/google/certificate-transparency-go/CODEOWNERS1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS3
-rw-r--r--vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md16
-rw-r--r--vendor/github.com/google/certificate-transparency-go/README.md118
-rw-r--r--vendor/github.com/google/certificate-transparency-go/asn1/README.md7
-rw-r--r--vendor/github.com/google/certificate-transparency-go/asn1/asn1.go146
-rw-r--r--vendor/github.com/google/certificate-transparency-go/asn1/common.go10
-rw-r--r--vendor/github.com/google/certificate-transparency-go/asn1/marshal.go2
-rw-r--r--vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go324
-rw-r--r--vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto4
-rw-r--r--vendor/github.com/google/certificate-transparency-go/client/getentries.go13
-rw-r--r--vendor/github.com/google/certificate-transparency-go/client/logclient.go75
-rw-r--r--vendor/github.com/google/certificate-transparency-go/client/multilog.go30
-rw-r--r--vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml201
-rw-r--r--vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml217
-rw-r--r--vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml157
-rw-r--r--vendor/github.com/google/certificate-transparency-go/codecov.yml19
-rw-r--r--vendor/github.com/google/certificate-transparency-go/gometalinter.json28
-rw-r--r--vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go2
-rw-r--r--vendor/github.com/google/certificate-transparency-go/jsonclient/client.go73
-rw-r--r--vendor/github.com/google/certificate-transparency-go/proto_gen.go25
-rw-r--r--vendor/github.com/google/certificate-transparency-go/serialization.go132
-rw-r--r--vendor/github.com/google/certificate-transparency-go/signatures.go12
-rw-r--r--vendor/github.com/google/certificate-transparency-go/tls/signature.go4
-rw-r--r--vendor/github.com/google/certificate-transparency-go/tls/tls.go48
-rw-r--r--vendor/github.com/google/certificate-transparency-go/tls/types.go4
-rw-r--r--vendor/github.com/google/certificate-transparency-go/types.go87
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/README.md7
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go54
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/curves.go37
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/error.go6
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/names.go7
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/nilref_nil_darwin.go26
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go26
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go2
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go25
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go53
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go10
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/revoked.go11
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root.go3
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go335
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go154
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_js.go19
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_linux.go1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_nacl.go8
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go1
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go7
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_unix.go12
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/root_windows.go36
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/rpki.go242
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/sec1.go27
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/verify.go487
-rw-r--r--vendor/github.com/google/certificate-transparency-go/x509/x509.go791
-rw-r--r--vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE202
-rw-r--r--vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go151
-rw-r--r--vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go72
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json3
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/CHANGES.md18
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go298
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md30
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go280
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto46
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/call_option.go101
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/gax.go4
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/internal/version.go33
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/invoke.go15
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go126
-rw-r--r--vendor/github.com/googleapis/gax-go/v2/release-please-config.json10
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/LICENSE208
-rw-r--r--vendor/github.com/jmespath/go-jmespath/.travis.yml15
-rw-r--r--vendor/github.com/jmespath/go-jmespath/Makefile21
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go8
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager_deprecated.go11
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go10
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/exec/controller_stub.go3
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/exec/executor.go4
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/reporter.go5
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/session.go1
-rw-r--r--vendor/github.com/moby/swarmkit/v2/agent/worker.go1
-rw-r--r--vendor/github.com/moby/swarmkit/v2/ca/certificates.go2
-rw-r--r--vendor/github.com/moby/swarmkit/v2/ca/config.go6
-rw-r--r--vendor/github.com/moby/swarmkit/v2/ca/server.go1
-rw-r--r--vendor/github.com/moby/swarmkit/v2/manager/allocator/cnmallocator/portallocator.go4
-rw-r--r--vendor/github.com/moby/swarmkit/v2/manager/allocator/network.go2
-rw-r--r--vendor/github.com/moby/swarmkit/v2/manager/scheduler/volumes.go7
-rw-r--r--vendor/github.com/moby/swarmkit/v2/manager/state/raft/raft.go8
-rw-r--r--vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/snapwrap.go9
-rw-r--r--vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/walwrap.go6
-rw-r--r--vendor/github.com/moby/swarmkit/v2/node/node.go9
-rw-r--r--vendor/github.com/moby/term/tc.go1
-rw-r--r--vendor/github.com/moby/term/term.go7
-rw-r--r--vendor/github.com/moby/term/term_windows.go4
-rw-r--r--vendor/github.com/moby/term/termios.go1
-rw-r--r--vendor/github.com/moby/term/termios_bsd.go1
-rw-r--r--vendor/github.com/moby/term/termios_nonbsd.go3
-rw-r--r--vendor/github.com/moby/term/windows/ansi_reader.go2
-rw-r--r--vendor/github.com/moby/term/windows/ansi_writer.go1
-rw-r--r--vendor/github.com/moby/term/windows/console.go1
-rw-r--r--vendor/github.com/moby/term/winsize.go1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go6
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go5
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/get_pid.go26
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go (renamed from vendor/github.com/google/certificate-transparency-go/client/configpb/gen.go)16
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go20
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go17
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go (renamed from vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go)314
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go651
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go32
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go18
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go28
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/labels.go6
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go112
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/num_threads.go25
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go22
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/observer.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go10
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go26
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go4
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go18
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go20
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go39
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go119
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go39
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go118
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go47
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go88
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/wrap.go4
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz.go1
-rw-r--r--vendor/github.com/prometheus/common/expfmt/openmetrics_create.go4
-rw-r--r--vendor/github.com/prometheus/common/model/time.go2
-rw-r--r--vendor/github.com/prometheus/procfs/.gitignore3
-rw-r--r--vendor/github.com/prometheus/procfs/.golangci.yml10
-rw-r--r--vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md4
-rw-r--r--vendor/github.com/prometheus/procfs/CONTRIBUTING.md4
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile10
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile.common89
-rw-r--r--vendor/github.com/prometheus/procfs/SECURITY.md2
-rw-r--r--vendor/github.com/prometheus/procfs/arp.go45
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo.go5
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_armx.go1
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go1
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_others.go4
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go1
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go1
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_s390x.go1
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo_x86.go1
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures.ttar7673
-rw-r--r--vendor/github.com/prometheus/procfs/internal/fs/fs.go2
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/parse.go6
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/readfile.go11
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go8
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go3
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs.go3
-rw-r--r--vendor/github.com/prometheus/procfs/kernel_random.go1
-rw-r--r--vendor/github.com/prometheus/procfs/loadavg.go2
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat.go10
-rw-r--r--vendor/github.com/prometheus/procfs/net_conntrackstat.go12
-rw-r--r--vendor/github.com/prometheus/procfs/net_dev.go8
-rw-r--r--vendor/github.com/prometheus/procfs/net_ip_socket.go2
-rw-r--r--vendor/github.com/prometheus/procfs/net_protocols.go4
-rw-r--r--vendor/github.com/prometheus/procfs/net_softnet.go8
-rw-r--r--vendor/github.com/prometheus/procfs/net_xfrm.go (renamed from vendor/github.com/prometheus/procfs/xfrm.go)9
-rw-r--r--vendor/github.com/prometheus/procfs/netstat.go8
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go10
-rw-r--r--vendor/github.com/prometheus/procfs/proc_cgroup.go6
-rw-r--r--vendor/github.com/prometheus/procfs/proc_cgroups.go98
-rw-r--r--vendor/github.com/prometheus/procfs/proc_environ.go2
-rw-r--r--vendor/github.com/prometheus/procfs/proc_fdinfo.go3
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go2
-rw-r--r--vendor/github.com/prometheus/procfs/proc_maps.go12
-rw-r--r--vendor/github.com/prometheus/procfs/proc_netstat.go440
-rw-r--r--vendor/github.com/prometheus/procfs/proc_psi.go14
-rw-r--r--vendor/github.com/prometheus/procfs/proc_smaps.go23
-rw-r--r--vendor/github.com/prometheus/procfs/proc_snmp.go353
-rw-r--r--vendor/github.com/prometheus/procfs/proc_snmp6.go381
-rw-r--r--vendor/github.com/prometheus/procfs/proc_stat.go11
-rw-r--r--vendor/github.com/prometheus/procfs/proc_status.go32
-rw-r--r--vendor/github.com/prometheus/procfs/proc_sys.go51
-rw-r--r--vendor/github.com/prometheus/procfs/schedstat.go6
-rw-r--r--vendor/github.com/prometheus/procfs/slab.go2
-rw-r--r--vendor/github.com/prometheus/procfs/softirqs.go160
-rw-r--r--vendor/github.com/prometheus/procfs/stat.go10
-rw-r--r--vendor/github.com/prometheus/procfs/vm.go6
-rw-r--r--vendor/github.com/prometheus/procfs/zoneinfo.go5
-rw-r--r--vendor/github.com/spf13/cobra/.golangci.yml14
-rw-r--r--vendor/github.com/spf13/cobra/.travis.yml28
-rw-r--r--vendor/github.com/spf13/cobra/CHANGELOG.md51
-rw-r--r--vendor/github.com/spf13/cobra/MAINTAINERS13
-rw-r--r--vendor/github.com/spf13/cobra/Makefile13
-rw-r--r--vendor/github.com/spf13/cobra/README.md690
-rw-r--r--vendor/github.com/spf13/cobra/active_help.go63
-rw-r--r--vendor/github.com/spf13/cobra/active_help.md157
-rw-r--r--vendor/github.com/spf13/cobra/args.go48
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go87
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.md2
-rw-r--r--vendor/github.com/spf13/cobra/bash_completionsV2.go383
-rw-r--r--vendor/github.com/spf13/cobra/cobra.go25
-rw-r--r--vendor/github.com/spf13/cobra/command.go192
-rw-r--r--vendor/github.com/spf13/cobra/command_notwin.go15
-rw-r--r--vendor/github.com/spf13/cobra/command_win.go15
-rw-r--r--vendor/github.com/spf13/cobra/completions.go (renamed from vendor/github.com/spf13/cobra/custom_completions.go)482
-rw-r--r--vendor/github.com/spf13/cobra/fish_completions.go199
-rw-r--r--vendor/github.com/spf13/cobra/flag_groups.go224
-rw-r--r--vendor/github.com/spf13/cobra/powershell_completions.go83
-rw-r--r--vendor/github.com/spf13/cobra/projects_using_cobra.md34
-rw-r--r--vendor/github.com/spf13/cobra/shell_completions.go14
-rw-r--r--vendor/github.com/spf13/cobra/shell_completions.md119
-rw-r--r--vendor/github.com/spf13/cobra/user_guide.md695
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go103
-rw-r--r--vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go60
-rw-r--r--vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go10
-rw-r--r--vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go10
-rw-r--r--vendor/go.etcd.io/etcd/server/v3/wal/decoder.go28
-rw-r--r--vendor/go.etcd.io/etcd/server/v3/wal/repair.go2
-rw-r--r--vendor/go.etcd.io/etcd/server/v3/wal/wal.go29
-rw-r--r--vendor/go.uber.org/atomic/.gitignore3
-rw-r--r--vendor/go.uber.org/atomic/.travis.yml27
-rw-r--r--vendor/go.uber.org/atomic/CHANGELOG.md44
-rw-r--r--vendor/go.uber.org/atomic/Makefile1
-rw-r--r--vendor/go.uber.org/atomic/README.md4
-rw-r--r--vendor/go.uber.org/atomic/bool.go20
-rw-r--r--vendor/go.uber.org/atomic/bool_ext.go2
-rw-r--r--vendor/go.uber.org/atomic/duration.go20
-rw-r--r--vendor/go.uber.org/atomic/duration_ext.go8
-rw-r--r--vendor/go.uber.org/atomic/error.go12
-rw-r--r--vendor/go.uber.org/atomic/float64.go19
-rw-r--r--vendor/go.uber.org/atomic/float64_ext.go34
-rw-r--r--vendor/go.uber.org/atomic/gen.go1
-rw-r--r--vendor/go.uber.org/atomic/int32.go24
-rw-r--r--vendor/go.uber.org/atomic/int64.go24
-rw-r--r--vendor/go.uber.org/atomic/string.go12
-rw-r--r--vendor/go.uber.org/atomic/string_ext.go2
-rw-r--r--vendor/go.uber.org/atomic/time.go (renamed from vendor/go.uber.org/multierr/go113.go)55
-rw-r--r--vendor/go.uber.org/atomic/time_ext.go (renamed from vendor/go.uber.org/zap/global_go112.go)20
-rw-r--r--vendor/go.uber.org/atomic/uint32.go24
-rw-r--r--vendor/go.uber.org/atomic/uint64.go24
-rw-r--r--vendor/go.uber.org/atomic/uintptr.go102
-rw-r--r--vendor/go.uber.org/atomic/unsafe_pointer.go58
-rw-r--r--vendor/go.uber.org/multierr/.travis.yml23
-rw-r--r--vendor/go.uber.org/multierr/CHANGELOG.md12
-rw-r--r--vendor/go.uber.org/multierr/LICENSE.txt2
-rw-r--r--vendor/go.uber.org/multierr/Makefile6
-rw-r--r--vendor/go.uber.org/multierr/README.md8
-rw-r--r--vendor/go.uber.org/multierr/error.go225
-rw-r--r--vendor/go.uber.org/zap/.readme.tmpl12
-rw-r--r--vendor/go.uber.org/zap/CHANGELOG.md104
-rw-r--r--vendor/go.uber.org/zap/README.md44
-rw-r--r--vendor/go.uber.org/zap/buffer/buffer.go18
-rw-r--r--vendor/go.uber.org/zap/global.go1
-rw-r--r--vendor/go.uber.org/zap/level.go17
-rw-r--r--vendor/go.uber.org/zap/logger.go78
-rw-r--r--vendor/go.uber.org/zap/options.go8
-rw-r--r--vendor/go.uber.org/zap/stacktrace.go175
-rw-r--r--vendor/go.uber.org/zap/sugar.go4
-rw-r--r--vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go188
-rw-r--r--vendor/go.uber.org/zap/zapcore/clock.go48
-rw-r--r--vendor/go.uber.org/zap/zapcore/console_encoder.go6
-rw-r--r--vendor/go.uber.org/zap/zapcore/encoder.go21
-rw-r--r--vendor/go.uber.org/zap/zapcore/entry.go10
-rw-r--r--vendor/go.uber.org/zap/zapcore/error.go2
-rw-r--r--vendor/go.uber.org/zap/zapcore/json_encoder.go104
-rw-r--r--vendor/go.uber.org/zap/zapcore/level.go12
-rw-r--r--vendor/go.uber.org/zap/zapcore/reflected_encoder.go (renamed from vendor/go.uber.org/zap/global_prego112.go)25
-rw-r--r--vendor/go.uber.org/zap/zapcore/sampler.go27
-rw-r--r--vendor/google.golang.org/api/googleapi/googleapi.go468
-rw-r--r--vendor/google.golang.org/api/googleapi/types.go202
-rw-r--r--vendor/google.golang.org/api/internal/creds.go14
-rw-r--r--vendor/google.golang.org/api/internal/settings.go54
-rw-r--r--vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE27
-rw-r--r--vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA14
-rw-r--r--vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go248
-rw-r--r--vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go17
-rw-r--r--vendor/google.golang.org/api/internal/version.go8
-rw-r--r--vendor/google.golang.org/api/option/credentials_go19.go23
-rw-r--r--vendor/google.golang.org/api/option/credentials_notgo19.go22
-rw-r--r--vendor/google.golang.org/api/option/internaloption/internaloption.go28
-rw-r--r--vendor/google.golang.org/api/option/option.go22
-rw-r--r--vendor/google.golang.org/api/transport/cert/default_cert.go123
-rw-r--r--vendor/google.golang.org/api/transport/cert/enterprise_cert.go56
-rw-r--r--vendor/google.golang.org/api/transport/cert/secureconnect_cert.go123
-rw-r--r--vendor/google.golang.org/api/transport/grpc/dial.go74
-rw-r--r--vendor/google.golang.org/api/transport/grpc/dial_appengine.go1
-rw-r--r--vendor/google.golang.org/api/transport/grpc/dial_socketopt.go1
-rw-r--r--vendor/google.golang.org/api/transport/internal/dca/dca.go18
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go336
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go1278
-rw-r--r--vendor/google.golang.org/grpc/balancer/base/balancer.go4
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go4
-rw-r--r--vendor/google.golang.org/grpc/credentials/google/xds.go54
-rw-r--r--vendor/google.golang.org/grpc/dialoptions.go13
-rw-r--r--vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go2
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog.go4
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/xds.go2
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go70
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/handler_server.go22
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go45
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go28
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http_util.go5
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/transport.go6
-rw-r--r--vendor/google.golang.org/grpc/regenerate.sh7
-rw-r--r--vendor/google.golang.org/grpc/resolver/map.go55
-rw-r--r--vendor/google.golang.org/grpc/server.go54
-rw-r--r--vendor/google.golang.org/grpc/stream.go64
-rw-r--r--vendor/google.golang.org/grpc/version.go2
-rw-r--r--vendor/google.golang.org/protobuf/AUTHORS3
-rw-r--r--vendor/google.golang.org/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protojson/decode.go174
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protojson/encode.go51
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go76
-rw-r--r--vendor/google.golang.org/protobuf/encoding/prototext/decode.go116
-rw-r--r--vendor/google.golang.org/protobuf/encoding/prototext/encode.go39
-rw-r--r--vendor/google.golang.org/protobuf/encoding/protowire/wire.go4
-rw-r--r--vendor/google.golang.org/protobuf/internal/descfmt/stringer.go66
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/defval/default.go78
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go7
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go96
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/decode.go30
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go6
-rw-r--r--vendor/google.golang.org/protobuf/internal/encoding/text/doc.go4
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/build.go19
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc.go380
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go36
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go80
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go167
-rw-r--r--vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go136
-rw-r--r--vendor/google.golang.org/protobuf/internal/filetype/build.go87
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/api_export.go42
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/checkinit.go12
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_extension.go36
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_field.go90
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_map.go20
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_message.go30
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/codec_tables.go290
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/convert.go228
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/convert_list.go42
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/convert_map.go32
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/decode.go21
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/enum.go10
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/extension.go26
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go57
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_export.go18
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go100
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/legacy_message.go122
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/merge.go32
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/message.go41
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/message_reflect.go74
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go118
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/validate.go50
-rw-r--r--vendor/google.golang.org/protobuf/internal/impl/weak.go16
-rw-r--r--vendor/google.golang.org/protobuf/internal/order/order.go16
-rw-r--r--vendor/google.golang.org/protobuf/internal/order/range.go22
-rw-r--r--vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go6
-rw-r--r--vendor/google.golang.org/protobuf/internal/version/version.go54
-rw-r--r--vendor/google.golang.org/protobuf/proto/decode.go3
-rw-r--r--vendor/google.golang.org/protobuf/proto/doc.go21
-rw-r--r--vendor/google.golang.org/protobuf/proto/encode.go5
-rw-r--r--vendor/google.golang.org/protobuf/proto/equal.go50
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go6
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go32
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/source.go1
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/type.go1
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go2
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go2
-rw-r--r--vendor/google.golang.org/protobuf/runtime/protoimpl/version.go8
-rw-r--r--vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go2
-rw-r--r--vendor/k8s.io/klog/v2/.gitignore17
-rw-r--r--vendor/k8s.io/klog/v2/CONTRIBUTING.md22
-rw-r--r--vendor/k8s.io/klog/v2/LICENSE191
-rw-r--r--vendor/k8s.io/klog/v2/OWNERS14
-rw-r--r--vendor/k8s.io/klog/v2/README.md118
-rw-r--r--vendor/k8s.io/klog/v2/RELEASE.md9
-rw-r--r--vendor/k8s.io/klog/v2/SECURITY.md22
-rw-r--r--vendor/k8s.io/klog/v2/SECURITY_CONTACTS20
-rw-r--r--vendor/k8s.io/klog/v2/code-of-conduct.md3
-rw-r--r--vendor/k8s.io/klog/v2/contextual.go186
-rw-r--r--vendor/k8s.io/klog/v2/exit.go69
-rw-r--r--vendor/k8s.io/klog/v2/imports.go38
-rw-r--r--vendor/k8s.io/klog/v2/internal/buffer/buffer.go159
-rw-r--r--vendor/k8s.io/klog/v2/internal/clock/README.md7
-rw-r--r--vendor/k8s.io/klog/v2/internal/clock/clock.go178
-rw-r--r--vendor/k8s.io/klog/v2/internal/dbg/dbg.go42
-rw-r--r--vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go253
-rw-r--r--vendor/k8s.io/klog/v2/internal/severity/severity.go58
-rw-r--r--vendor/k8s.io/klog/v2/k8s_references.go158
-rw-r--r--vendor/k8s.io/klog/v2/klog.go1689
-rw-r--r--vendor/k8s.io/klog/v2/klog_file.go130
-rw-r--r--vendor/k8s.io/klog/v2/klog_file_others.go19
-rw-r--r--vendor/k8s.io/klog/v2/klog_file_windows.go34
-rw-r--r--vendor/k8s.io/klog/v2/klogr.go87
-rw-r--r--vendor/modules.txt104
464 files changed, 26758 insertions, 13684 deletions
diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen
index bd6215f6a3..07205f5f4b 100644
--- a/hack/make/.go-autogen
+++ b/hack/make/.go-autogen
@@ -1,9 +1,5 @@
#!/usr/bin/env bash
-source hack/dockerfile/install/runc.installer
-source hack/dockerfile/install/tini.installer
-source hack/dockerfile/install/containerd.installer
-
LDFLAGS="${LDFLAGS} \
-X \"github.com/docker/docker/dockerversion.Version=${VERSION}\" \
-X \"github.com/docker/docker/dockerversion.GitCommit=${GITCOMMIT}\" \
diff --git a/vendor.mod b/vendor.mod
index 562b8bf3dd..5db9468978 100644
--- a/vendor.mod
+++ b/vendor.mod
@@ -7,7 +7,7 @@ module github.com/docker/docker
go 1.18
require (
- cloud.google.com/go v0.93.3
+ cloud.google.com/go/compute v1.7.0
cloud.google.com/go/logging v1.4.2
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1
github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0
@@ -15,8 +15,8 @@ require (
github.com/Microsoft/hcsshim v0.9.5
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91
github.com/armon/go-radix v1.0.1-0.20221118154546-54df44f2176c
- github.com/aws/aws-sdk-go v1.31.6
- github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549
+ github.com/aws/aws-sdk-go v1.37.0
+ github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8
github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5
github.com/containerd/cgroups v1.0.4
github.com/containerd/containerd v1.6.10
@@ -54,13 +54,13 @@ require (
github.com/moby/locker v1.0.1
github.com/moby/patternmatcher v0.5.0
github.com/moby/pubsub v1.0.0
- github.com/moby/swarmkit/v2 v2.0.0-20221102165002-6341884e5fc9
+ github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054
github.com/moby/sys/mount v0.3.3
github.com/moby/sys/mountinfo v0.6.2
github.com/moby/sys/sequential v0.5.0
github.com/moby/sys/signal v0.7.0
github.com/moby/sys/symlink v0.2.0
- github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
+ github.com/moby/term v0.0.0-20221120202655-abb19827d345
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
@@ -69,10 +69,10 @@ require (
github.com/opencontainers/selinux v1.10.2
github.com/pelletier/go-toml v1.9.4
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.12.1
+ github.com/prometheus/client_golang v1.13.0
github.com/rootless-containers/rootlesskit v1.1.0
github.com/sirupsen/logrus v1.9.0
- github.com/spf13/cobra v1.1.3
+ github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3
@@ -85,12 +85,13 @@ require (
golang.org/x/sync v0.1.0
golang.org/x/sys v0.2.0
golang.org/x/time v0.1.0
- google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
- google.golang.org/grpc v1.47.0
+ google.golang.org/genproto v0.0.0-20220706185917-7780775163c4
+ google.golang.org/grpc v1.48.0
gotest.tools/v3 v3.4.0
)
require (
+ cloud.google.com/go v0.102.1 // indirect
code.cloudfoundry.org/clock v1.0.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
@@ -107,7 +108,7 @@ require (
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.2 // indirect
- github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
+ github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v1.2.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -116,9 +117,10 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/btree v1.1.2 // indirect
- github.com/google/certificate-transparency-go v1.1.2 // indirect; replaced; see "replace" section at the bottom of this file for the actual version.
+ github.com/google/certificate-transparency-go v1.1.4 // indirect; replaced; see "replace" section at the bottom of this file for the actual version.
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
- github.com/googleapis/gax-go/v2 v2.0.5 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
+ github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
@@ -127,23 +129,23 @@ require (
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/jmespath/go-jmespath v0.3.0 // indirect
+ github.com/inconshreveable/mousetrap v1.0.1 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect
github.com/philhofer/fwd v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.32.1 // indirect
- github.com/prometheus/procfs v0.7.3 // indirect
+ github.com/prometheus/common v0.37.0 // indirect
+ github.com/prometheus/procfs v0.8.0 // indirect
github.com/rexray/gocsi v1.2.2 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/tinylib/msgp v1.1.0 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
- go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect
- go.etcd.io/etcd/raft/v3 v3.5.2 // indirect
- go.etcd.io/etcd/server/v3 v3.5.2 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
+ go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect
+ go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
+ go.etcd.io/etcd/server/v3 v3.5.6 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect
@@ -155,19 +157,14 @@ require (
go.opentelemetry.io/otel/sdk v1.4.1 // indirect
go.opentelemetry.io/otel/trace v1.4.1 // indirect
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.6.0 // indirect
- go.uber.org/zap v1.17.0 // indirect
+ go.uber.org/atomic v1.9.0 // indirect
+ go.uber.org/multierr v1.8.0 // indirect
+ go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/oauth2 v0.1.0 // indirect
golang.org/x/text v0.4.0 // indirect
- google.golang.org/api v0.54.0 // indirect
+ google.golang.org/api v0.93.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.28.0 // indirect
+ google.golang.org/protobuf v1.28.1 // indirect
+ k8s.io/klog/v2 v2.80.1 // indirect
)
-
-// Resolve dependency hell with github.com/cloudflare/cfssl (transitive via
-// swarmkit) by pinning the certificate-transparency-go version. Remove once
-// module go.etcd.io/etcd/server/v3 has upgraded its dependency on
-// go.opentelemetry.io/otel to v1.
-replace github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.20
diff --git a/vendor.sum b/vendor.sum
index 2f80995a99..a16d48c971 100644
--- a/vendor.sum
+++ b/vendor.sum
@@ -24,17 +24,31 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/logging v1.4.2 h1:Mu2Q75VBDQlW1HlBMjTX4X84UFR73G1TiLlRYc/b7tA=
cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -46,6 +60,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o=
code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
@@ -135,9 +150,11 @@ github.com/armon/go-radix v1.0.1-0.20221118154546-54df44f2176c h1:651/eoCRnQ7YtS
github.com/armon/go-radix v1.0.1-0.20221118154546-54df44f2176c/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.31.6 h1:nKjQbpXhdImctBh1e0iLg9iQW/X297LPPuY/9f92R2k=
-github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk=
+github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -152,8 +169,8 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549 h1:QJJnIXZ34OUK5JfWlq1l3n0SfO9g1amiLFIcTECgpq0=
-github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549/go.mod h1:313oBJKClgRD/+t59eUnrfG7/xHXZJd7v+SjCacDm4Q=
+github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8 h1:fcONpniVVbh9+duVZYYbJuc+yGGdLRxTqpk7pTTz/qI=
+github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8/go.mod h1:GrjfimWtH8h8EqJSfbO+sTQYV/fAjL/VN7dMeU8XP2Y=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
@@ -341,6 +358,7 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
@@ -416,8 +434,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e h1:P10tZmVD2XclAaT9l7OduMH1OLFzTa1wUuUqHZnEdI0=
-github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c=
+github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee h1:v6Eju/FhxsACGNipFEPBZZAzGr1F/jlRQr1qiBw2nEE=
+github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c=
github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg=
github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@@ -442,9 +460,11 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
@@ -469,7 +489,6 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
@@ -492,6 +511,7 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 h1:xisWqjiKEff2B0KfFYGpCqc3M3zdTz+OHQHRc09FeYk=
github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -535,8 +555,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/certificate-transparency-go v1.0.20 h1:azETE79toaBOyp+StoEBy8atzQujL0PyBPEmsEeDCXI=
-github.com/google/certificate-transparency-go v1.0.20/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY=
+github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -550,6 +570,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
@@ -583,12 +604,21 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@@ -666,8 +696,9 @@ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
+github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 h1:G1+wBT0dwjIrBdLy0MIG0i+E4CQxEnedHXdauJEIH6g=
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
@@ -675,8 +706,10 @@ github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6t
github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
-github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
@@ -766,8 +799,8 @@ github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YO
github.com/moby/pubsub v1.0.0 h1:jkp/imWsmJz2f6LyFsk7EkVeN2HxR/HTTOY8kHrsxfA=
github.com/moby/pubsub v1.0.0/go.mod h1:bXSO+3h5MNXXCaEG+6/NlAIk7MMZbySZlnB+cUQhKKc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/swarmkit/v2 v2.0.0-20221102165002-6341884e5fc9 h1:d/XCmjx1zKZdzlBX90kSGDex7V2GE2jdGDr9nXYZg/Q=
-github.com/moby/swarmkit/v2 v2.0.0-20221102165002-6341884e5fc9/go.mod h1:/so6Lct4y1x14UprW/loFsOe6xoXVTlvh25V36ULXNQ=
+github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054 h1:ny1MdKQaQI/i+i7YrwO2zPpfW2ET1QBR59HqZd+ozOI=
+github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054/go.mod h1:rUZl7gR5C7156P2qEE6wnx4riFgBjqmsQaUqo/WeyBw=
github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
@@ -786,8 +819,8 @@ github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZ
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=
+github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -899,8 +932,10 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
+github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -916,8 +951,9 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -929,8 +965,9 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rexray/gocsi v1.2.2 h1:h9F/eSizORihN+XT+mxhq7ClZ3cYo1L9RvasN6dKz8U=
github.com/rexray/gocsi v1.2.2/go.mod h1:X9oJHHpIVGmfKdK8e+JuCXafggk7HxL9mWQOgrsoHpo=
@@ -942,6 +979,7 @@ github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+v
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
@@ -977,8 +1015,9 @@ github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -1001,8 +1040,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1066,23 +1105,23 @@ go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
+go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE=
-go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU=
+go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI=
+go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
-go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o=
+go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
-go.etcd.io/etcd/pkg/v3 v3.5.2 h1:YZUojdoPhOyl5QILYnR8LTUbbNefu/sV4ma+ZMr2tto=
-go.etcd.io/etcd/pkg/v3 v3.5.2/go.mod h1:zsXz+9D/kijzRiG/UnFGDTyHKcVp0orwiO8iMLAi+k0=
+go.etcd.io/etcd/pkg/v3 v3.5.6 h1:k1GZrGrfMHy5/cg2bxNGsmLTFisatyhDYCFLRuaavWg=
+go.etcd.io/etcd/pkg/v3 v3.5.6/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
-go.etcd.io/etcd/raft/v3 v3.5.2 h1:uCC37qOXqBvKqTGHGyhASsaCsnTuJugl1GvneJNwHWo=
-go.etcd.io/etcd/raft/v3 v3.5.2/go.mod h1:G6pCP1sFgbjod7/KnEHY0vHUViqxjkdt6AiKsD0GRr8=
+go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50=
+go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0=
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
-go.etcd.io/etcd/server/v3 v3.5.2 h1:B6ytJvS4Fmt8nkjzS2/8POf4tuPhFMluE0lWd4dx/7U=
-go.etcd.io/etcd/server/v3 v3.5.2/go.mod h1:mlG8znIEz4N/28GABrohZCBM11FqgGVQcpbcyJgh0j0=
+go.etcd.io/etcd/server/v3 v3.5.6 h1:RXuwaB8AMiV62TqcqIt4O4bG8NWjsxOkDJVT3MZI5Ds=
+go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@@ -1094,6 +1133,7 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs=
@@ -1103,6 +1143,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk=
go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g=
@@ -1110,9 +1151,11 @@ go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdT
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk=
@@ -1122,33 +1165,40 @@ go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1s
go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
go.opentelemetry.io/otel/sdk v1.4.1 h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y=
go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE=
go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ=
go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=
go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
+go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
+go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1166,6 +1216,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1259,8 +1310,16 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1279,6 +1338,12 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y=
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1292,6 +1357,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1392,14 +1458,27 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
@@ -1408,6 +1487,7 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1502,6 +1582,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1529,8 +1612,22 @@ google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59t
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.93.0 h1:T2xt9gi0gHdxdnRkVQhT8mIvPaXKNsDNWz+L696M66M=
+google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1585,6 +1682,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
@@ -1598,10 +1696,37 @@ google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
-google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 h1:7YDGQC/0sigNGzsEWyb9s72jTxlFdwVEYNJHbfQ+Dtg=
+google.golang.org/genproto v0.0.0-20220706185917-7780775163c4/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1631,13 +1756,17 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1652,8 +1781,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1690,6 +1820,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
@@ -1742,6 +1873,8 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
+k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
new file mode 100644
index 0000000000..00b99da4a0
--- /dev/null
+++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
@@ -0,0 +1,107 @@
+{
+ "accessapproval": "1.3.0",
+ "accesscontextmanager": "1.2.0",
+ "aiplatform": "1.13.0",
+ "analytics": "0.7.0",
+ "apigateway": "1.2.0",
+ "apigeeconnect": "1.2.0",
+ "appengine": "1.3.0",
+ "area120": "0.3.0",
+ "artifactregistry": "1.3.0",
+ "asset": "1.2.0",
+ "assuredworkloads": "0.6.0",
+ "automl": "1.3.0",
+ "baremetalsolution": "0.1.0",
+ "batch": "0.1.0",
+ "billing": "1.2.0",
+ "binaryauthorization": "0.6.0",
+ "certificatemanager": "0.2.0",
+ "channel": "1.6.0",
+ "cloudbuild": "1.2.0",
+ "clouddms": "1.2.0",
+ "cloudtasks": "1.3.0",
+ "compute": "1.7.0",
+ "contactcenterinsights": "1.2.0",
+ "container": "1.2.0",
+ "containeranalysis": "0.3.0",
+ "datacatalog": "1.3.0",
+ "dataflow": "0.4.0",
+ "datafusion": "1.3.0",
+ "datalabeling": "0.3.0",
+ "dataplex": "0.4.0",
+ "dataproc": "1.5.0",
+ "dataqna": "0.3.0",
+ "datastream": "0.5.0",
+ "deploy": "1.2.0",
+ "dialogflow": "1.10.0",
+ "dlp": "1.4.0",
+ "documentai": "1.4.0",
+ "domains": "0.4.0",
+ "essentialcontacts": "1.2.0",
+ "eventarc": "1.6.0",
+ "filestore": "1.2.0",
+ "functions": "1.4.0",
+ "gaming": "1.2.0",
+ "gkebackup": "0.1.0",
+ "gkeconnect": "0.3.0",
+ "gkehub": "0.7.0",
+ "gkemulticloud": "0.2.0",
+ "grafeas": "0.2.0",
+ "gsuiteaddons": "1.2.0",
+ "iam": "0.3.0",
+ "iap": "1.3.0",
+ "ids": "0.3.0",
+ "iot": "1.2.0",
+ "kms": "1.4.0",
+ "language": "1.2.0",
+ "lifesciences": "0.3.0",
+ "managedidentities": "1.2.0",
+ "mediatranslation": "0.3.0",
+ "memcache": "1.2.0",
+ "metastore": "1.2.0",
+ "monitoring": "1.5.0",
+ "networkconnectivity": "1.2.0",
+ "networkmanagement": "1.2.0",
+ "networksecurity": "0.3.0",
+ "notebooks": "0.4.0",
+ "optimization": "0.1.0",
+ "orchestration": "1.2.0",
+ "orgpolicy": "1.3.0",
+ "osconfig": "1.5.0",
+ "oslogin": "1.2.0",
+ "phishingprotection": "0.3.0",
+ "policytroubleshooter": "1.2.0",
+ "privatecatalog": "0.3.0",
+ "recaptchaenterprise/v2": "2.0.1",
+ "recommendationengine": "0.2.0",
+ "recommender": "1.3.0",
+ "redis": "1.5.0",
+ "resourcemanager": "1.2.0",
+ "resourcesettings": "1.2.0",
+ "retail": "1.4.0",
+ "run": "0.1.1",
+ "scheduler": "1.2.0",
+ "secretmanager": "1.4.0",
+ "security": "1.4.0",
+ "securitycenter": "1.8.0",
+ "servicecontrol": "1.3.0",
+ "servicedirectory": "1.2.0",
+ "servicemanagement": "1.3.0",
+ "serviceusage": "1.2.0",
+ "shell": "1.2.0",
+ "speech": "1.4.0",
+ "storagetransfer": "1.3.0",
+ "talent": "0.8.0",
+ "texttospeech": "1.3.0",
+ "tpu": "1.2.0",
+ "trace": "1.2.0",
+ "translate": "1.2.0",
+ "video": "1.6.0",
+ "videointelligence": "1.2.0",
+ "vision/v2": "2.0.0",
+ "vmmigration": "0.3.0",
+ "vpcaccess": "1.2.0",
+ "webrisk": "1.2.0",
+ "websecurityscanner": "1.2.0",
+ "workflows": "1.4.0"
+}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json
new file mode 100644
index 0000000000..52eec6a307
--- /dev/null
+++ b/vendor/cloud.google.com/go/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "0.102.1"
+}
diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md
index a01aabaef9..0ced42279c 100644
--- a/vendor/cloud.google.com/go/CHANGES.md
+++ b/vendor/cloud.google.com/go/CHANGES.md
@@ -1,5 +1,209 @@
# Changes
+## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17)
+
+
+### Bug Fixes
+
+* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35))
+
+## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24)
+
+
+### Features
+
+* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187))
+
+### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03)
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b))
+
+## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20)
+
+
+### Features
+
+* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682))
+* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095))
+* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b))
+* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672))
+* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8))
+* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059))
+
+## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04)
+
+
+### Features
+
+* **analytics/admin:** add the `AcknowledgeUserDataCollection` operation which acknowledges the terms of user data collection for the specified property feat: add the new resource type `DataStream`, which is planned to eventually replace `WebDataStream`, `IosAppDataStream`, `AndroidAppDataStream` resources fix!: remove `GetEnhancedMeasurementSettings`, `UpdateEnhancedMeasurementSettingsRequest`, `UpdateEnhancedMeasurementSettingsRequest` operations from the API feat: add `CreateDataStream`, `DeleteDataStream`, `UpdateDataStream`, `ListDataStreams` operations to support the new `DataStream` resource feat: add `DISPLAY_VIDEO_360_ADVERTISER_LINK`, `DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL` fields to `ChangeHistoryResourceType` enum feat: add the `account` field to the `Property` type docs: update the documentation with a new list of valid values for `UserLink.direct_roles` field ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **assuredworkloads:** EU Regions and Support With Sovereign Controls ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5))
+* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5))
+* **dialogflow:** added export documentation method feat: added filter in list documentations request feat: added option to import custom metadata from Google Cloud Storage in reload document request feat: added option to apply partial update to the smart messaging allowlist in reload document request feat: added filter in list knowledge bases request ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **dialogflow:** removed OPTIONAL for speech model variant docs: added more docs for speech model variant and improved docs format for participant ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **recaptchaenterprise:** add new reCAPTCHA Enterprise fraud annotations ([3dd34a2](https://www.github.com/googleapis/google-cloud-go/commit/3dd34a262edbff63b9aece8faddc2ff0d98ce42a))
+
+
+### Bug Fixes
+
+* **artifactregistry:** fix resource pattern ID segment name ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **compute:** add parameter in compute bazel rules ([#692](https://www.github.com/googleapis/google-cloud-go/issues/692)) ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **profiler:** refine regular expression for parsing backoff duration in E2E tests ([#5229](https://www.github.com/googleapis/google-cloud-go/issues/5229)) ([4438aeb](https://www.github.com/googleapis/google-cloud-go/commit/4438aebca2ec01d4dbf22287aa651937a381e043))
+* **profiler:** remove certificate expiration workaround ([#5222](https://www.github.com/googleapis/google-cloud-go/issues/5222)) ([2da36c9](https://www.github.com/googleapis/google-cloud-go/commit/2da36c95f44d5f88fd93cd949ab78823cea74fe7))
+
+## [0.99.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.98.0...v0.99.0) (2021-12-06)
+
+
+### Features
+
+* **dialogflow/cx:** added `TelephonyTransferCall` in response message ([fe27098](https://www.github.com/googleapis/google-cloud-go/commit/fe27098e5d429911428821ded57384353e699774))
+
+## [0.98.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.97.0...v0.98.0) (2021-12-03)
+
+
+### Features
+
+* **aiplatform:** add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature feat: add network and enable_private_service_connect to IndexEndpoint feat: add service_attachment to IndexPrivateEndpoints feat: add stratified_split field to training_pipeline InputDataConfig ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **aiplatform:** add featurestore service to aiplatform v1 feat: add metadata service to aiplatform v1 ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+* **aiplatform:** Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8))
+* **aiplatform:** Tensorboard v1 protos release feat:Exposing a field for v1 CustomJob-Tensorboard integration. ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **binaryauthorization:** add new admission rule types to Policy feat: update SignatureAlgorithm enum to match algorithm names in KMS feat: add SystemPolicyV1Beta1 service ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **channel:** add resource type to ChannelPartnerLink ([c206948](https://www.github.com/googleapis/google-cloud-go/commit/c2069487f6af5bcb37d519afeb60e312e35e67d5))
+* **cloudtasks:** add C++ rules for Cloud Tasks ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **compute:** Move compute.v1 from googleapis-discovery to googleapis ([#675](https://www.github.com/googleapis/google-cloud-go/issues/675)) ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **compute:** Switch to string enums for compute ([#685](https://www.github.com/googleapis/google-cloud-go/issues/685)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **contactcenterinsights:** Add ability to update phrase matchers feat: Add issue model stats to time series feat: Add display name to issue model stats ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **contactcenterinsights:** Add WriteDisposition to BigQuery Export API ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **contactcenterinsights:** deprecate issue_matches docs: if conversation medium is unspecified, it will default to PHONE_CALL ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
+* **contactcenterinsights:** new feature flag disable_issue_modeling docs: fixed formatting issues in the reference documentation ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **contactcenterinsights:** remove feature flag disable_issue_modeling ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **datacatalog:** Added BigQueryDateShardedSpec.latest_shard_resource field feat: Added SearchCatalogResult.display_name field feat: Added SearchCatalogResult.description field ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **dataproc:** add Dataproc Serverless for Spark Batches API ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+* **dataproc:** Add support for dataproc BatchController service ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6))
+* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
+* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
+* **dialogflow/cx:** added support for comparing between versions docs: clarified security settings API reference ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
+* **dialogflow/cx:** added support for Deployments with ListDeployments and GetDeployment apis feat: added support for DeployFlow api under Environments feat: added support for TestCasesConfig under Environment docs: added long running operation explanation for several apis fix!: marked resource name of security setting as not-required ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0))
+* **dialogflow/cx:** allow setting custom CA for generic webhooks and release CompareVersions API docs: clarify DLP template reader usage ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **dialogflow:** added support to configure security settings, language code and time zone on conversation profile ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **dialogflow:** support document metadata filter in article suggestion and smart reply model in human agent assistant ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a))
+* **dlp:** added deidentify replacement dictionaries feat: added field for BigQuery inspect template inclusion lists feat: added field to support infotype versioning ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **domains:** added library for Cloud Domains v1 API. Also added methods for the transfer-in flow docs: improved API comments ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6))
+* **functions:** Secret Manager integration fields 'secret_environment_variables' and 'secret_volumes' added feat: CMEK integration fields 'kms_key_name' and 'docker_repository' added ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **kms:** add OAEP+SHA1 to the list of supported algorithms ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0))
+* **kms:** add RPC retry information for MacSign, MacVerify, and GenerateRandomBytes Committer: [@bdhess](https://www.github.com/bdhess) ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
+* **kms:** add support for Raw PKCS[#1](https://www.github.com/googleapis/google-cloud-go/issues/1) signing keys ([58bea89](https://www.github.com/googleapis/google-cloud-go/commit/58bea89a3d177d5c431ff19310794e3296253353))
+* **monitoring/apiv3:** add CreateServiceTimeSeries RPC ([9e41088](https://www.github.com/googleapis/google-cloud-go/commit/9e41088bb395fbae0e757738277d5c95fa2749c8))
+* **monitoring/dashboard:** Added support for auto-close configurations ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **monitoring/metricsscope:** promote apiv1 to GA ([#5135](https://www.github.com/googleapis/google-cloud-go/issues/5135)) ([33c0f63](https://www.github.com/googleapis/google-cloud-go/commit/33c0f63e0e0ce69d9ef6e57b04d1b8cc10ed2b78))
+* **osconfig:** OSConfig: add OS policy assignment rpcs ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
+* **osconfig:** Update OSConfig API ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a))
+* **osconfig:** Update osconfig v1 and v1alpha RecurringSchedule.Frequency with DAILY frequency ([59e548a](https://www.github.com/googleapis/google-cloud-go/commit/59e548acc249c7bddd9c884c2af35d582a408c4d))
+* **recaptchaenterprise:** add reCAPTCHA Enterprise account defender API methods ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8))
+* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **security/privateca:** add IAMPolicy & Locations mix-in support ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
+* **securitycenter:** Added a new API method UpdateExternalSystem, which enables updating a finding w/ external system metadata. External systems are a child resource under finding, and are housed on the finding itself, and can also be filtered on in Notifications, the ListFindings and GroupFindings API ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **securitycenter:** Added mute related APIs, proto messages and fields ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897))
+* **securitycenter:** Added resource type and display_name field to the FindingResult, and supported them in the filter for ListFindings and GroupFindings. Also added display_name to the resource which is surfaced in NotificationMessage ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **securitycenter:** Added vulnerability field to the finding feat: Added type field to the resource which is surfaced in NotificationMessage ([090cc3a](https://www.github.com/googleapis/google-cloud-go/commit/090cc3ae0f8747a14cc904fc6d429e2f5379bb03))
+* **servicecontrol:** add C++ rules for many Cloud services ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **speech:** add result_end_time to SpeechRecognitionResult ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **speech:** added alternative_language_codes to RecognitionConfig feat: WEBM_OPUS codec feat: SpeechAdaptation configuration feat: word confidence feat: spoken punctuation and spoken emojis feat: hint boost in SpeechContext ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **texttospeech:** update v1 proto ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **workflows/executions:** add a stack_trace field to the Error messages specifying where the error occured feat: add call_log_level field to Execution messages doc: clarify requirement to escape strings within JSON arguments ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+
+
+### Bug Fixes
+
+* **accesscontextmanager:** nodejs package name access-context-manager ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+* **aiplatform:** Remove invalid resource annotations ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
+* **compute/metadata:** return an error when all retries have failed ([#5063](https://www.github.com/googleapis/google-cloud-go/issues/5063)) ([c792a0d](https://www.github.com/googleapis/google-cloud-go/commit/c792a0d13db019c9964efeee5c6bc85b07ca50fa)), refs [#5062](https://www.github.com/googleapis/google-cloud-go/issues/5062)
+* **compute:** make parent_id fields required compute move and insert methods ([#686](https://www.github.com/googleapis/google-cloud-go/issues/686)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **compute:** Move compute_small protos under its own directory ([#681](https://www.github.com/googleapis/google-cloud-go/issues/681)) ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897))
+* **internal/gapicgen:** fix a compute filtering ([#5111](https://www.github.com/googleapis/google-cloud-go/issues/5111)) ([77aa19d](https://www.github.com/googleapis/google-cloud-go/commit/77aa19de7fc33a9e831e6b91bd324d6832b44d99))
+* **internal/godocfx:** only put TOC status on mod if all pkgs have same status ([#4974](https://www.github.com/googleapis/google-cloud-go/issues/4974)) ([309b59e](https://www.github.com/googleapis/google-cloud-go/commit/309b59e583d1bf0dd9ffe84223034eb8a2975d47))
+* **internal/godocfx:** replace * with HTML code ([#5049](https://www.github.com/googleapis/google-cloud-go/issues/5049)) ([a8f7c06](https://www.github.com/googleapis/google-cloud-go/commit/a8f7c066e8d97120ae4e12963e3c9acc8b8906c2))
+* **monitoring/apiv3:** Reintroduce deprecated field/enum for backward compatibility docs: Use absolute link targets in comments ([45fd259](https://www.github.com/googleapis/google-cloud-go/commit/45fd2594d99ef70c776df26866f0a3b537e7e69e))
+* **profiler:** workaround certificate expiration issue in integration tests ([#4955](https://www.github.com/googleapis/google-cloud-go/issues/4955)) ([de9e465](https://www.github.com/googleapis/google-cloud-go/commit/de9e465bea8cd0580c45e87d2cbc2b610615b363))
+* **security/privateca:** include mixin protos as input for mixin rpcs ([479c2f9](https://www.github.com/googleapis/google-cloud-go/commit/479c2f90d556a106b25ebcdb1539d231488182da))
+* **security/privateca:** repair service config to enable mixins ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
+* **video/transcoder:** update nodejs package name to video-transcoder ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+
+## [0.97.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.96.0...v0.97.0) (2021-09-29)
+
+
+### Features
+
+* **internal** add Retry func to testutil from samples repository [#4902](https://github.com/googleapis/google-cloud-go/pull/4902)
+
+## [0.96.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.95.0...v0.96.0) (2021-09-28)
+
+
+### Features
+
+* **civil:** add IsEmpty function to time, date and datetime ([#4728](https://www.github.com/googleapis/google-cloud-go/issues/4728)) ([88bfa64](https://www.github.com/googleapis/google-cloud-go/commit/88bfa64d6df2f3bb7d41e0b8f56717dd3de790e2)), refs [#4727](https://www.github.com/googleapis/google-cloud-go/issues/4727)
+* **internal/godocfx:** detect preview versions ([#4899](https://www.github.com/googleapis/google-cloud-go/issues/4899)) ([9b60844](https://www.github.com/googleapis/google-cloud-go/commit/9b608445ce9ebabbc87a50e85ce6ef89125031d2))
+* **internal:** provide wrapping for retried errors ([#4797](https://www.github.com/googleapis/google-cloud-go/issues/4797)) ([ce5f4db](https://www.github.com/googleapis/google-cloud-go/commit/ce5f4dbab884e847a2d9f1f8f3fcfd7df19a505a))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** restore fmting proto files ([#4789](https://www.github.com/googleapis/google-cloud-go/issues/4789)) ([5606b54](https://www.github.com/googleapis/google-cloud-go/commit/5606b54b97bb675487c6c138a4081c827218f933))
+* **internal/trace:** use xerrors.As for trace ([#4813](https://www.github.com/googleapis/google-cloud-go/issues/4813)) ([05fe61c](https://www.github.com/googleapis/google-cloud-go/commit/05fe61c5aa4860bdebbbe3e91a9afaba16aa6184))
+
+## [0.95.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.1...v0.95.0) (2021-09-21)
+
+### Bug Fixes
+
+* **internal/gapicgen:** add a temporary import ([#4756](https://www.github.com/googleapis/google-cloud-go/issues/4756)) ([4d9c046](https://www.github.com/googleapis/google-cloud-go/commit/4d9c046b66a2dc205e2c14b676995771301440da))
+* **compute/metadata:** remove heavy gax dependency ([#4784](https://www.github.com/googleapis/google-cloud-go/issues/4784)) ([ea00264](https://www.github.com/googleapis/google-cloud-go/commit/ea00264428137471805f2ec67f04f3a5a42928fa))
+
+### [0.94.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.0...v0.94.1) (2021-09-02)
+
+
+### Bug Fixes
+
+* **compute/metadata:** fix retry logic to not panic on error ([#4714](https://www.github.com/googleapis/google-cloud-go/issues/4714)) ([75c63b9](https://www.github.com/googleapis/google-cloud-go/commit/75c63b94d2cf86606fffc3611f7e6150b667eedc)), refs [#4713](https://www.github.com/googleapis/google-cloud-go/issues/4713)
+
+## [0.94.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.92.0...v0.94.0) (2021-08-31)
+
+
+### Features
+
+* **aiplatform:** add XAI, model monitoring, and index services to aiplatform v1 ([e385b40](https://www.github.com/googleapis/google-cloud-go/commit/e385b40a1e2ecf81f5fd0910de5c37275951f86b))
+* **analytics/admin:** add `GetDataRetentionSettings`, `UpdateDataRetentionSettings` methods to the API ([8467899](https://www.github.com/googleapis/google-cloud-go/commit/8467899ab6ebf0328c543bfb5fbcddeb2f53a082))
+* **asset:** Release of relationships in v1, Add content type Relationship to support relationship export Committer: lvv@ ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **assuredworkloads:** Add Canada Regions And Support compliance regime ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **cloudbuild/apiv1:** Add ability to configure BuildTriggers to create Builds that require approval before executing and ApproveBuild API to approve or reject pending Builds ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **cloudbuild/apiv1:** add script field to BuildStep message ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **cloudbuild/apiv1:** Update cloudbuild proto with the service_account for BYOSA Triggers. ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **compute/metadata:** retry error when talking to metadata service ([#4648](https://www.github.com/googleapis/google-cloud-go/issues/4648)) ([81c6039](https://www.github.com/googleapis/google-cloud-go/commit/81c6039503121f8da3de4f4cd957b8488a3ef620)), refs [#4642](https://www.github.com/googleapis/google-cloud-go/issues/4642)
+* **dataproc:** remove apiv1beta2 client ([#4682](https://www.github.com/googleapis/google-cloud-go/issues/4682)) ([2248554](https://www.github.com/googleapis/google-cloud-go/commit/22485541affb1251604df292670a20e794111d3e))
+* **gaming:** support version reporting API ([cd65cec](https://www.github.com/googleapis/google-cloud-go/commit/cd65cecf15c4a01648da7f8f4f4d497772961510))
+* **gkehub:** Add request_id under `DeleteMembershipRequest` and `UpdateMembershipRequest` ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **internal/carver:** support carving batches ([#4623](https://www.github.com/googleapis/google-cloud-go/issues/4623)) ([2972d19](https://www.github.com/googleapis/google-cloud-go/commit/2972d194da19bedf16d76fda471c06a965cfdcd6))
+* **kms:** add support for Key Reimport ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+* **metastore:** Added the Backup resource and Backup resource GetIamPolicy/SetIamPolicy to V1 feat: Added the RestoreService method to V1 ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **monitoring/dashboard:** Added support for logs-based alerts: https://cloud.google.com/logging/docs/alerting/log-based-alerts feat: Added support for user-defined labels on cloud monitoring's Service and ServiceLevelObjective objects fix!: mark required fields in QueryTimeSeriesRequest as required ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **osconfig:** Update osconfig v1 and v1alpha with WindowsApplication ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+* **speech:** Add transcript normalization ([b31646d](https://www.github.com/googleapis/google-cloud-go/commit/b31646d1e12037731df4b5c0ba9f60b6434d7b9b))
+* **talent:** Add new commute methods in Search APIs feat: Add new histogram type 'publish_time_in_day' feat: Support filtering by requisitionId is ListJobs API ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **translate:** added v3 proto for online/batch document translation and updated v3beta1 proto for format conversion ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+
+
+### Bug Fixes
+
+* **datastream:** Change a few resource pattern variables from camelCase to snake_case ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+
## [0.92.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.91.0...v0.92.0) (2021-08-16)
diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md
index c3a3852c38..6d6e48b65b 100644
--- a/vendor/cloud.google.com/go/CONTRIBUTING.md
+++ b/vendor/cloud.google.com/go/CONTRIBUTING.md
@@ -2,7 +2,7 @@
1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose).
The issue will be used to discuss the bug or feature and should be created
- before sending a CL.
+ before sending a PR.
1. [Install Go](https://golang.org/dl/).
1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md
index 9524921fe5..669cc75327 100644
--- a/vendor/cloud.google.com/go/README.md
+++ b/vendor/cloud.google.com/go/README.md
@@ -27,67 +27,18 @@ make backwards-incompatible changes.
## Supported APIs
-| Google API | Status | Package |
-| ----------------------------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
-| [Asset][cloud-asset] | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta) |
-| [Automl][cloud-automl] | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1) |
-| [BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) |
-| [Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable) |
-| [Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1) |
-| [Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2) |
-| [Compute Engine][cloud-compute] | alpha | [`cloud.google.com/go/compute/apiv1`](https://pkg.go.dev/cloud.google.com/go/compute/apiv1) |
-| [Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1) |
-| [ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1) |
-| [Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1) |
-| [Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore) |
-| [Debugger][cloud-debugger] | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2) |
-| [Dialogflow][cloud-dialogflow] | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2) |
-| [Data Loss Prevention][cloud-dlp] | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2) |
-| [ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting) |
-| [Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore) |
-| [IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam) |
-| [IoT][cloud-iot] | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1) |
-| [IRM][cloud-irm] | alpha | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2) |
-| [KMS][cloud-kms] | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1) |
-| [Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1) |
-| [Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging) |
-| [Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1) |
-| [Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) |
-| [OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) |
-| [Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) |
-| [Pub/Sub Lite][cloud-pubsublite] | stable | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) |
-| [Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) |
-| [reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) |
-| [Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) |
-| [Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1) |
-| [Securitycenter][cloud-securitycenter] | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1) |
-| [Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner) |
-| [Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1) |
-| [Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage) |
-| [Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1) |
-| [Text To Speech][cloud-texttospeech] | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1) |
-| [Trace][cloud-trace] | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2) |
-| [Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate) |
-| [Video Intelligence][cloud-video] | beta | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2) |
-| [Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1) |
-| [Webrisk][cloud-webrisk] | alpha | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1) |
-
-> **Alpha status**: the API is still being actively developed. As a
-> result, it might change in backward-incompatible ways and is not recommended
-> for production use.
->
-> **Beta status**: the API is largely complete, but still has outstanding
-> features and bugs to be addressed. There may be minor backwards-incompatible
-> changes where necessary.
->
-> **Stable status**: the API is mature and ready for production use. We will
-> continue addressing bugs and feature requests.
-
-Documentation and examples are available at [pkg.go.dev/cloud.google.com/go](https://pkg.go.dev/cloud.google.com/go)
+For an updated list of all of our released APIs please see our
+[reference docs](https://cloud.google.com/go/docs/reference).
## [Go Versions Supported](#supported-versions)
-We currently support Go versions 1.11 and newer.
+Our libraries are compatible with at least the three most recent, major Go
+releases. They are currently compatible with:
+
+- Go 1.18
+- Go 1.17
+- Go 1.16
+- Go 1.15
## Authorization
@@ -125,12 +76,12 @@ client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
## Contributing
Contributions are welcome. Please, see the
-[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
document for details.
Please note that this project is released with a Contributor Code of Conduct.
By participating in this project you agree to abide by its terms.
-See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
for more information.
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md
index d04176097d..6d0fcf4f9f 100644
--- a/vendor/cloud.google.com/go/RELEASING.md
+++ b/vendor/cloud.google.com/go/RELEASING.md
@@ -79,14 +79,14 @@ here is how to manually cut a release of `cloud.google.com/go`.
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
any failures in the most recent build, address them before proceeding with
the release.
-1. Navigate to `google-cloud-go/` and switch to master.
+1. Navigate to `google-cloud-go/` and switch to main.
1. `git pull`
1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
The current latest tag `$CV` is the largest tag. It should look something
like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a
specific library, not the module root). We'll call the current version `$CV`
and the new version `$NV`.
-1. On master, run `git log $CV...` to list all the changes since the last
+1. On main, run `git log $CV...` to list all the changes since the last
release. NOTE: You must manually visually parse out changes to submodules [1]
(the `git log` is going to show you things in submodules, which are not going
to be part of your release).
@@ -98,7 +98,7 @@ here is how to manually cut a release of `cloud.google.com/go`.
and create a PR titled `chore: release $NV`.
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
merging any other PRs in the meantime:
- a. Switch to master.
+ a. Switch to main.
b. `git pull`
c. Tag the repo with the next version: `git tag $NV`.
d. Push the tag to origin:
@@ -118,13 +118,13 @@ here is how to manually cut a release of a submodule.
any failures in the most recent build, address them before proceeding with
the release. (This applies even if the failures are in a different submodule
from the one being released.)
-1. Navigate to `google-cloud-go/` and switch to master.
+1. Navigate to `google-cloud-go/` and switch to main.
1. `git pull`
1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
existing releases. The current latest tag `$CV` is the largest tag. It
should look something like `datastore/vX.Y.Z`. We'll call the current version
`$CV` and the new version `$NV`.
-1. On master, run `git log $CV.. -- datastore/` to list all the changes to the
+1. On main, run `git log $CV.. -- datastore/` to list all the changes to the
submodule directory since the last release.
1. Edit `datastore/CHANGES.md` to include a summary of the changes.
1. In `internal/version` run `go generate`.
@@ -132,7 +132,7 @@ here is how to manually cut a release of a submodule.
and create a PR titled `chore(datastore): release $NV`.
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
merging any other PRs in the meantime:
- a. Switch to master.
+ a. Switch to main.
b. `git pull`
c. Tag the repo with the next version: `git tag $NV`.
d. Push the tag to origin:
diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/cloud.google.com/go/compute/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 545bd9d379..1405d09674 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -16,7 +16,7 @@
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
-// as documented at https://developers.google.com/compute/docs/metadata.
+// as documented at https://cloud.google.com/compute/docs/metadata/overview.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
@@ -61,14 +61,18 @@ var (
instID = &cachedValue{k: "instance/id", trim: true}
)
-var defaultClient = &Client{hc: &http.Client{
- Transport: &http.Transport{
- Dial: (&net.Dialer{
- Timeout: 2 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- },
-}}
+var defaultClient = &Client{hc: newDefaultHTTPClient()}
+
+func newDefaultHTTPClient() *http.Client {
+ return &http.Client{
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ },
+ }
+}
// NotDefinedError is returned when requested metadata is not defined.
//
@@ -130,7 +134,7 @@ func testOnGCE() bool {
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
- res, err := defaultClient.hc.Do(req.WithContext(ctx))
+ res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
@@ -140,7 +144,8 @@ func testOnGCE() bool {
}()
go func() {
- addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
+ resolver := &net.Resolver{}
+ addrs, err := resolver.LookupHost(ctx, "metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
@@ -282,6 +287,7 @@ func NewClient(c *http.Client) *Client {
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
+ ctx := context.TODO()
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
@@ -304,9 +310,25 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
}
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
- res, err := c.hc.Do(req)
- if err != nil {
- return "", "", err
+ var res *http.Response
+ var reqErr error
+ retryer := newRetryer()
+ for {
+ res, reqErr = c.hc.Do(req)
+ var code int
+ if res != nil {
+ code = res.StatusCode
+ }
+ if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
+ if err := sleep(ctx, delay); err != nil {
+ return "", "", err
+ }
+ continue
+ }
+ break
+ }
+ if reqErr != nil {
+ return "", "", reqErr
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go
new file mode 100644
index 0000000000..0f18f3cda1
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/retry.go
@@ -0,0 +1,114 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import (
+ "context"
+ "io"
+ "math/rand"
+ "net/http"
+ "time"
+)
+
+const (
+ maxRetryAttempts = 5
+)
+
+var (
+ syscallRetryable = func(err error) bool { return false }
+)
+
+// defaultBackoff is basically equivalent to gax.Backoff without the need for
+// the dependency.
+type defaultBackoff struct {
+ max time.Duration
+ mul float64
+ cur time.Duration
+}
+
+func (b *defaultBackoff) Pause() time.Duration {
+ d := time.Duration(1 + rand.Int63n(int64(b.cur)))
+ b.cur = time.Duration(float64(b.cur) * b.mul)
+ if b.cur > b.max {
+ b.cur = b.max
+ }
+ return d
+}
+
+// sleep is the equivalent of gax.Sleep without the need for the dependency.
+func sleep(ctx context.Context, d time.Duration) error {
+ t := time.NewTimer(d)
+ select {
+ case <-ctx.Done():
+ t.Stop()
+ return ctx.Err()
+ case <-t.C:
+ return nil
+ }
+}
+
+func newRetryer() *metadataRetryer {
+ return &metadataRetryer{bo: &defaultBackoff{
+ cur: 100 * time.Millisecond,
+ max: 30 * time.Second,
+ mul: 2,
+ }}
+}
+
+type backoff interface {
+ Pause() time.Duration
+}
+
+type metadataRetryer struct {
+ bo backoff
+ attempts int
+}
+
+func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) {
+ if status == http.StatusOK {
+ return 0, false
+ }
+ retryOk := shouldRetry(status, err)
+ if !retryOk {
+ return 0, false
+ }
+ if r.attempts == maxRetryAttempts {
+ return 0, false
+ }
+ r.attempts++
+ return r.bo.Pause(), true
+}
+
+func shouldRetry(status int, err error) bool {
+ if 500 <= status && status <= 599 {
+ return true
+ }
+ if err == io.ErrUnexpectedEOF {
+ return true
+ }
+ // Transient network errors should be retried.
+ if syscallRetryable(err) {
+ return true
+ }
+ if err, ok := err.(interface{ Temporary() bool }); ok {
+ if err.Temporary() {
+ return true
+ }
+ }
+ if err, ok := err.(interface{ Unwrap() error }); ok {
+ return shouldRetry(status, err.Unwrap())
+ }
+ return false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
new file mode 100644
index 0000000000..bb412f8917
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
@@ -0,0 +1,26 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package metadata
+
+import "syscall"
+
+func init() {
+ // Initialize syscallRetryable to return true on transient socket-level
+ // errors. These errors are specific to Linux.
+ syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
+}
diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go
index 746696f371..06463833e3 100644
--- a/vendor/cloud.google.com/go/doc.go
+++ b/vendor/cloud.google.com/go/doc.go
@@ -165,16 +165,42 @@ For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "htt
Inspecting errors
-Most of the errors returned by the generated clients can be converted into a
-`grpc.Status`. Converting your errors to this type can be a useful to get
-more information about what went wrong while debugging.
- if err != {
+Most of the errors returned by the generated clients are wrapped in an
+`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror)
+and can be further unwrapped into a `grpc.Status` or `googleapi.Error` depending
+on the transport used to make the call (gRPC or REST). Converting your errors to
+these types can be a useful way to get more information about what went wrong
+while debugging.
+
+`apierror.APIError` gives access to specific details in the
+error. The transport-specific errors can still be unwrapped using the
+`apierror.APIError`.
+ if err != nil {
+ var ae *apierror.APIError
+ if errors.As(err, &ae) {
+ log.Println(ae.Reason())
+ log.Println(ae.Details().Help.GetLinks())
+ }
+ }
+
+If the gRPC transport was used, the `grpc.Status` can still be parsed using the
+`status.FromError` function.
+ if err != nil {
if s, ok := status.FromError(err); ok {
- log.Println(s.Message())
- for _, d := range s.Proto().Details {
- log.Println(d)
- }
- }
+ log.Println(s.Message())
+ for _, d := range s.Proto().Details {
+ log.Println(d)
+ }
+ }
+ }
+
+If the REST transport was used, the `googleapi.Error` can be parsed in a similar
+way.
+ if err != nil {
+ var gerr *googleapi.Error
+ if errors.As(err, &gerr) {
+ log.Println(gerr.Message)
+ }
}
Client Stability
diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
new file mode 100644
index 0000000000..885710c975
--- /dev/null
+++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
@@ -0,0 +1,322 @@
+{
+ "release-type": "go-yoshi",
+ "include-component-in-tag": true,
+ "tag-separator": "/",
+ "packages": {
+ "accessapproval": {
+ "component": "accessapproval"
+ },
+ "accesscontextmanager": {
+ "component": "accesscontextmanager"
+ },
+ "aiplatform": {
+ "component": "aiplatform"
+ },
+ "analytics": {
+ "component": "analytics"
+ },
+ "apigateway": {
+ "component": "apigateway"
+ },
+ "apigeeconnect": {
+ "component": "apigeeconnect"
+ },
+ "appengine": {
+ "component": "appengine"
+ },
+ "area120": {
+ "component": "area120"
+ },
+ "artifactregistry": {
+ "component": "artifactregistry"
+ },
+ "asset": {
+ "component": "asset"
+ },
+ "assuredworkloads": {
+ "component": "assuredworkloads"
+ },
+ "automl": {
+ "component": "automl"
+ },
+ "baremetalsolution": {
+ "component": "baremetalsolution"
+ },
+ "batch": {
+ "component": "batch"
+ },
+ "billing": {
+ "component": "billing"
+ },
+ "binaryauthorization": {
+ "component": "binaryauthorization"
+ },
+ "certificatemanager": {
+ "component": "certificatemanager"
+ },
+ "channel": {
+ "component": "channel"
+ },
+ "cloudbuild": {
+ "component": "cloudbuild"
+ },
+ "clouddms": {
+ "component": "clouddms"
+ },
+ "cloudtasks": {
+ "component": "cloudtasks"
+ },
+ "compute": {
+ "component": "compute"
+ },
+ "contactcenterinsights": {
+ "component": "contactcenterinsights"
+ },
+ "container": {
+ "component": "container"
+ },
+ "containeranalysis": {
+ "component": "containeranalysis"
+ },
+ "datacatalog": {
+ "component": "datacatalog"
+ },
+ "dataflow": {
+ "component": "dataflow"
+ },
+ "datafusion": {
+ "component": "datafusion"
+ },
+ "datalabeling": {
+ "component": "datalabeling"
+ },
+ "dataplex": {
+ "component": "dataplex"
+ },
+ "dataproc": {
+ "component": "dataproc"
+ },
+ "dataqna": {
+ "component": "dataqna"
+ },
+ "datastream": {
+ "component": "datastream"
+ },
+ "deploy": {
+ "component": "deploy"
+ },
+ "dialogflow": {
+ "component": "dialogflow"
+ },
+ "dlp": {
+ "component": "dlp"
+ },
+ "documentai": {
+ "component": "documentai"
+ },
+ "domains": {
+ "component": "domains"
+ },
+ "essentialcontacts": {
+ "component": "essentialcontacts"
+ },
+ "eventarc": {
+ "component": "eventarc"
+ },
+ "filestore": {
+ "component": "filestore"
+ },
+ "functions": {
+ "component": "functions"
+ },
+ "gaming": {
+ "component": "gaming"
+ },
+ "gkebackup": {
+ "component": "gkebackup"
+ },
+ "gkeconnect": {
+ "component": "gkeconnect"
+ },
+ "gkehub": {
+ "component": "gkehub"
+ },
+ "gkemulticloud": {
+ "component": "gkemulticloud"
+ },
+ "grafeas": {
+ "component": "grafeas"
+ },
+ "gsuiteaddons": {
+ "component": "gsuiteaddons"
+ },
+ "iam": {
+ "component": "iam"
+ },
+ "iap": {
+ "component": "iap"
+ },
+ "ids": {
+ "component": "ids"
+ },
+ "iot": {
+ "component": "iot"
+ },
+ "kms": {
+ "component": "kms"
+ },
+ "language": {
+ "component": "language"
+ },
+ "lifesciences": {
+ "component": "lifesciences"
+ },
+ "managedidentities": {
+ "component": "managedidentities"
+ },
+ "mediatranslation": {
+ "component": "mediatranslation"
+ },
+ "memcache": {
+ "component": "memcache"
+ },
+ "metastore": {
+ "component": "metastore"
+ },
+ "monitoring": {
+ "component": "monitoring"
+ },
+ "networkconnectivity": {
+ "component": "networkconnectivity"
+ },
+ "networkmanagement": {
+ "component": "networkmanagement"
+ },
+ "networksecurity": {
+ "component": "networksecurity"
+ },
+ "notebooks": {
+ "component": "notebooks"
+ },
+ "optimization": {
+ "component": "optimization"
+ },
+ "orchestration": {
+ "component": "orchestration"
+ },
+ "orgpolicy": {
+ "component": "orgpolicy"
+ },
+ "osconfig": {
+ "component": "osconfig"
+ },
+ "oslogin": {
+ "component": "oslogin"
+ },
+ "phishingprotection": {
+ "component": "phishingprotection"
+ },
+ "policytroubleshooter": {
+ "component": "policytroubleshooter"
+ },
+ "privatecatalog": {
+ "component": "privatecatalog"
+ },
+ "recaptchaenterprise/v2": {
+ "component": "recaptchaenterprise"
+ },
+ "recommendationengine": {
+ "component": "recommendationengine"
+ },
+ "recommender": {
+ "component": "recommender"
+ },
+ "redis": {
+ "component": "redis"
+ },
+ "resourcemanager": {
+ "component": "resourcemanager"
+ },
+ "resourcesettings": {
+ "component": "resourcesettings"
+ },
+ "retail": {
+ "component": "retail"
+ },
+ "run": {
+ "component": "run"
+ },
+ "scheduler": {
+ "component": "scheduler"
+ },
+ "secretmanager": {
+ "component": "secretmanager"
+ },
+ "security": {
+ "component": "security"
+ },
+ "securitycenter": {
+ "component": "securitycenter"
+ },
+ "servicecontrol": {
+ "component": "servicecontrol"
+ },
+ "servicedirectory": {
+ "component": "servicedirectory"
+ },
+ "servicemanagement": {
+ "component": "servicemanagement"
+ },
+ "serviceusage": {
+ "component": "serviceusage"
+ },
+ "shell": {
+ "component": "shell"
+ },
+ "speech": {
+ "component": "speech"
+ },
+ "storagetransfer": {
+ "component": "storagetransfer"
+ },
+ "talent": {
+ "component": "talent"
+ },
+ "texttospeech": {
+ "component": "texttospeech"
+ },
+ "tpu": {
+ "component": "tpu"
+ },
+ "trace": {
+ "component": "trace"
+ },
+ "translate": {
+ "component": "translate"
+ },
+ "video": {
+ "component": "video"
+ },
+ "videointelligence": {
+ "component": "videointelligence"
+ },
+ "vision/v2": {
+ "component": "vision"
+ },
+ "vmmigration": {
+ "component": "vmmigration"
+ },
+ "vpcaccess": {
+ "component": "vpcaccess"
+ },
+ "webrisk": {
+ "component": "webrisk"
+ },
+ "websecurityscanner": {
+ "component": "websecurityscanner"
+ },
+ "workflows": {
+ "component": "workflows"
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json
new file mode 100644
index 0000000000..546e7c31ad
--- /dev/null
+++ b/vendor/cloud.google.com/go/release-please-config.json
@@ -0,0 +1,10 @@
+{
+ "release-type": "go-yoshi",
+ "separate-pull-requests": true,
+ "include-component-in-tag": false,
+ "packages": {
+ ".": {
+ "component": "main"
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/testing.md b/vendor/cloud.google.com/go/testing.md
index 03867d561a..bcca0604db 100644
--- a/vendor/cloud.google.com/go/testing.md
+++ b/vendor/cloud.google.com/go/testing.md
@@ -9,7 +9,7 @@ on the Go client libraries.
## Testing gRPC services using fakes
*Note*: You can see the full
-[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/fake).
+[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake).
The clients found in `cloud.google.com/go` are gRPC based, with a couple of
notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage)
@@ -143,7 +143,7 @@ func TestTranslateTextWithConcreteClient(t *testing.T) {
## Testing using mocks
*Note*: You can see the full
-[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/mock).
+[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock).
When mocking code you need to work with interfaces. Let’s create an interface
for the `cloud.google.com/go/translate/apiv3` client used in the
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 2def23fa1d..39fa6d5fe7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -43,7 +43,7 @@ type Config struct {
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
- // to `""` to use the default generated endpoint.
+ // to `nil` or the value to `""` to use the default generated endpoint.
//
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
@@ -138,7 +138,7 @@ type Config struct {
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
- // You should use this flag to disble 100-Continue if you experience issues
+ // You should use this flag to disable 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
@@ -183,7 +183,7 @@ type Config struct {
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
- // .WithEC2MetadataDiableTimeoutOverride(true)))
+ // .WithEC2MetadataDisableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
@@ -194,7 +194,7 @@ type Config struct {
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
- // to make requets. It is not recommended to set this value on the session
+ // to make requests. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//
@@ -238,6 +238,7 @@ type Config struct {
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
+ // To use EndpointDiscovery, Endpoint should be unset or set to an empty string.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
@@ -437,13 +438,6 @@ func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
return c
}
-// MergeIn merges the passed in configs into the existing config object.
-func (c *Config) MergeIn(cfgs ...*Config) {
- for _, other := range cfgs {
- mergeInConfig(c, other)
- }
-}
-
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
// when resolving the endpoint for a service
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
@@ -458,6 +452,27 @@ func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEn
return c
}
+// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value
+// returning a Config pointer for chaining.
+func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
+ c.LowerCaseHeaderMaps = &t
+ return c
+}
+
+// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
+ c.DisableRestProtocolURICleaning = &t
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
@@ -570,6 +585,10 @@ func mergeInConfig(dst *Config, other *Config) {
if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
}
+
+ if other.LowerCaseHeaderMaps != nil {
+ dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps
+ }
}
// Copy will return a shallow copy of the Config object. If any additional
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index aa902d7083..d95a5eb540 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
+ // Was any endpoint provided by the user, or one was derived by the
+ // SDK's endpoint resolver?
r.Error = aws.ErrMissingEndpoint
}
}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
index 9f8fd92a50..a880a3de8f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -50,7 +50,7 @@ package credentials
import (
"fmt"
- "sync/atomic"
+ "sync"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -173,7 +173,9 @@ type Expiry struct {
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
- e.expiration = expiration
+ // Passed in expirations should have the monotonic clock values stripped.
+ // This ensures time comparisons will be based on wall-time.
+ e.expiration = expiration.Round(0)
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
@@ -205,9 +207,10 @@ func (e *Expiry) ExpiresAt() time.Time {
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
- creds atomic.Value
- sf singleflight.Group
+ sf singleflight.Group
+ m sync.RWMutex
+ creds Value
provider Provider
}
@@ -216,7 +219,6 @@ func NewCredentials(provider Provider) *Credentials {
c := &Credentials{
provider: provider,
}
- c.creds.Store(Value{})
return c
}
@@ -233,8 +235,17 @@ func NewCredentials(provider Provider) *Credentials {
//
// Passed in Context is equivalent to aws.Context, and context.Context.
func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
- if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
- return curCreds.(Value), nil
+ // Check if credentials are cached, and not expired.
+ select {
+ case curCreds, ok := <-c.asyncIsExpired():
+ // ok will only be true, of the credentials were not expired. ok will
+ // be false and have no value if the credentials are expired.
+ if ok {
+ return curCreds, nil
+ }
+ case <-ctx.Done():
+ return Value{}, awserr.New("RequestCanceled",
+ "request context canceled", ctx.Err())
}
// Cannot pass context down to the actual retrieve, because the first
@@ -252,18 +263,23 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
}
}
-func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) {
- if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
- return curCreds.(Value), nil
+func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
+ return curCreds, nil
}
+ var creds Value
+ var err error
if p, ok := c.provider.(ProviderWithContext); ok {
creds, err = p.RetrieveWithContext(ctx)
} else {
creds, err = c.provider.Retrieve()
}
if err == nil {
- c.creds.Store(creds)
+ c.creds = creds
}
return creds, err
@@ -288,7 +304,10 @@ func (c *Credentials) Get() (Value, error) {
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
- c.creds.Store(Value{})
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.creds = Value{}
}
// IsExpired returns if the credentials are no longer valid, and need
@@ -297,11 +316,32 @@ func (c *Credentials) Expire() {
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
- return c.isExpired(c.creds.Load())
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.isExpiredLocked(c.creds)
}
-// isExpired helper method wrapping the definition of expired credentials.
-func (c *Credentials) isExpired(creds interface{}) bool {
+// asyncIsExpired returns a channel of credentials Value. If the channel is
+// closed the credentials are expired and credentials value are not empty.
+func (c *Credentials) asyncIsExpired() <-chan Value {
+ ch := make(chan Value, 1)
+ go func() {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
+ ch <- curCreds
+ }
+
+ close(ch)
+ }()
+
+ return ch
+}
+
+// isExpiredLocked helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpiredLocked(creds interface{}) bool {
return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
}
@@ -309,13 +349,17 @@ func (c *Credentials) isExpired(creds interface{}) bool {
// the underlying Provider, if it supports that interface. Otherwise, it returns
// an error.
func (c *Credentials) ExpiresAt() (time.Time, error) {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
expirer, ok := c.provider.(Expirer)
if !ok {
return time.Time{}, awserr.New("ProviderNotExpirer",
- fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName),
+ fmt.Sprintf("provider %s does not support ExpiresAt()",
+ c.creds.ProviderName),
nil)
}
- if c.creds.Load().(Value) == (Value{}) {
+ if c.creds == (Value{}) {
// set expiration time to the distant past
return time.Time{}, nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
index e155149581..22b5c5d9f3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -17,8 +17,9 @@ var (
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
)
-// A SharedCredentialsProvider retrieves credentials from the current user's home
-// directory, and keeps track if those credentials are expired.
+// A SharedCredentialsProvider retrieves access key pair (access key ID,
+// secret access key, and session token if present) credentials from the current
+// user's home directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type SharedCredentialsProvider struct {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
new file mode 100644
index 0000000000..18c940ab3c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
@@ -0,0 +1,60 @@
+// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
+//
+// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
+// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
+// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
+// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
+//
+// Loading AWS SSO credentials with the AWS shared configuration file
+//
+// You can use configure AWS SSO credentials from the AWS shared configuration file by
+// providing the specifying the required keys in the profile:
+//
+// sso_account_id
+// sso_region
+// sso_role_name
+// sso_start_url
+//
+// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
+// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
+// provided, or an error will be returned.
+//
+// [profile devsso]
+// sso_start_url = https://my-sso-portal.awsapps.com/start
+// sso_role_name = SSOReadOnlyRole
+// sso_region = us-east-1
+// sso_account_id = 123456789012
+//
+// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
+// retrieve credentials. For example:
+//
+// sess, err := session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// Profile: "devsso",
+// })
+// if err != nil {
+// return err
+// }
+//
+// Programmatically loading AWS SSO credentials directly
+//
+// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
+// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
+//
+// svc := sso.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region
+// })
+//
+// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start")
+//
+// credentials, err := provider.Get()
+// if err != nil {
+// return err
+// }
+//
+// Additional Resources
+//
+// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+//
+// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
+package ssocreds
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
new file mode 100644
index 0000000000..ceca7dceec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package ssocreds
+
+import "os"
+
+func getHomeDirectory() string {
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
new file mode 100644
index 0000000000..eb48f61e5b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
@@ -0,0 +1,7 @@
+package ssocreds
+
+import "os"
+
+func getHomeDirectory() string {
+ return os.Getenv("USERPROFILE")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
new file mode 100644
index 0000000000..6eda2a5557
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
@@ -0,0 +1,180 @@
+package ssocreds
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/service/sso"
+ "github.com/aws/aws-sdk-go/service/sso/ssoiface"
+)
+
+// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid.
+// To refresh the SSO session run aws sso login with the corresponding profile.
+const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken"
+
+const invalidTokenMessage = "the SSO session has expired or is invalid"
+
+func init() {
+ nowTime = time.Now
+ defaultCacheLocation = defaultCacheLocationImpl
+}
+
+var nowTime func() time.Time
+
+// ProviderName is the name of the provider used to specify the source of credentials.
+const ProviderName = "SSOProvider"
+
+var defaultCacheLocation func() string
+
+func defaultCacheLocationImpl() string {
+ return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
+}
+
+// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
+type Provider struct {
+ credentials.Expiry
+
+ // The Client which is configured for the AWS Region where the AWS SSO user portal is located.
+ Client ssoiface.SSOAPI
+
+ // The AWS account that is assigned to the user.
+ AccountID string
+
+ // The role name that is assigned to the user.
+ RoleName string
+
+ // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
+ StartURL string
+}
+
+// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured
+// for the AWS Region where the AWS SSO user portal is located.
+func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
+ return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...)
+}
+
+// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
+// for the AWS Region where the AWS SSO user portal is located.
+func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
+ p := &Provider{
+ Client: client,
+ AccountID: accountID,
+ RoleName: roleName,
+ StartURL: startURL,
+ }
+
+ for _, fn := range optFns {
+ fn(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
+// by exchanging the accessToken present in ~/.aws/sso/cache.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ return p.RetrieveWithContext(aws.BackgroundContext())
+}
+
+// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
+// by exchanging the accessToken present in ~/.aws/sso/cache.
+func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
+ tokenFile, err := loadTokenFile(p.StartURL)
+ if err != nil {
+ return credentials.Value{}, err
+ }
+
+ output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{
+ AccessToken: &tokenFile.AccessToken,
+ AccountId: &p.AccountID,
+ RoleName: &p.RoleName,
+ })
+ if err != nil {
+ return credentials.Value{}, err
+ }
+
+ expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC()
+ p.SetExpiration(expireTime, 0)
+
+ return credentials.Value{
+ AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId),
+ SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey),
+ SessionToken: aws.StringValue(output.RoleCredentials.SessionToken),
+ ProviderName: ProviderName,
+ }, nil
+}
+
+func getCacheFileName(url string) (string, error) {
+ hash := sha1.New()
+ _, err := hash.Write([]byte(url))
+ if err != nil {
+ return "", err
+ }
+ return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
+}
+
+type rfc3339 time.Time
+
+func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
+ var value string
+
+ if err := json.Unmarshal(bytes, &value); err != nil {
+ return err
+ }
+
+ parse, err := time.Parse(time.RFC3339, value)
+ if err != nil {
+ return fmt.Errorf("expected RFC3339 timestamp: %v", err)
+ }
+
+ *r = rfc3339(parse)
+
+ return nil
+}
+
+type token struct {
+ AccessToken string `json:"accessToken"`
+ ExpiresAt rfc3339 `json:"expiresAt"`
+ Region string `json:"region,omitempty"`
+ StartURL string `json:"startUrl,omitempty"`
+}
+
+func (t token) Expired() bool {
+ return nowTime().Round(0).After(time.Time(t.ExpiresAt))
+}
+
+func loadTokenFile(startURL string) (t token, err error) {
+ key, err := getCacheFileName(startURL)
+ if err != nil {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
+ }
+
+ fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
+ if err != nil {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
+ }
+
+ if err := json.Unmarshal(fileBytes, &t); err != nil {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
+ }
+
+ if len(t.AccessToken) == 0 {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
+ }
+
+ if t.Expired() {
+ return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
+ }
+
+ return t, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index 6846ef6f80..e42c5cdbb2 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -244,9 +244,11 @@ type AssumeRoleProvider struct {
MaxJitterFrac float64
}
-// NewCredentials returns a pointer to a new Credentials object wrapping the
+// NewCredentials returns a pointer to a new Credentials value wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation.
+// role will be named after a nanosecond timestamp of this operation. The
+// Credentials value will attempt to refresh the credentials using the provider
+// when Credentials.Get is called, if the cached credentials are expiring.
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
@@ -268,9 +270,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As
return credentials.NewCredentials(p)
}
-// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation.
+// role will be named after a nanosecond timestamp of this operation. The
+// Credentials value will attempt to refresh the credentials using the provider
+// when Credentials.Get is called, if the cached credentials are expiring.
//
// Takes an AssumeRoler which can be satisfied by the STS client.
//
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
index 6feb262b2f..cefe2a76d4 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
@@ -52,9 +52,21 @@ type WebIdentityRoleProvider struct {
credentials.Expiry
PolicyArns []*sts.PolicyDescriptorType
- client stsiface.STSAPI
+ // Duration the STS credentials will be valid for. Truncated to seconds.
+ // If unset, the assumed role will use AssumeRoleWithWebIdentity's default
+ // expiry duration. See
+ // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity
+ // for more information.
+ Duration time.Duration
+
+ // The amount of time the credentials will be refreshed before they expire.
+ // This is useful refresh credentials before they expire to reduce risk of
+ // using credentials as they expire. If unset, will default to no expiry
+ // window.
ExpiryWindow time.Duration
+ client stsiface.STSAPI
+
tokenFetcher TokenFetcher
roleARN string
roleSessionName string
@@ -107,11 +119,18 @@ func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (
// uses unix time in nanoseconds to uniquely identify sessions.
sessionName = strconv.FormatInt(now().UnixNano(), 10)
}
+
+ var duration *int64
+ if p.Duration != 0 {
+ duration = aws.Int64(int64(p.Duration / time.Second))
+ }
+
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
PolicyArns: p.PolicyArns,
RoleArn: &p.roleARN,
RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)),
+ DurationSeconds: duration,
})
req.SetContext(ctx)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
index a716c021cf..69fa63dc08 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -20,7 +20,7 @@ func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOu
op := &request.Operation{
Name: "GetToken",
HTTPMethod: "PUT",
- HTTPPath: "/api/token",
+ HTTPPath: "/latest/api/token",
}
var output tokenOutput
@@ -62,7 +62,7 @@ func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string,
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
- HTTPPath: sdkuri.PathJoin("/meta-data", p),
+ HTTPPath: sdkuri.PathJoin("/latest/meta-data", p),
}
output := &metadataOutput{}
@@ -88,7 +88,7 @@ func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) {
op := &request.Operation{
Name: "GetUserData",
HTTPMethod: "GET",
- HTTPPath: "/user-data",
+ HTTPPath: "/latest/user-data",
}
output := &metadataOutput{}
@@ -113,7 +113,7 @@ func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (stri
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
- HTTPPath: sdkuri.PathJoin("/dynamic", p),
+ HTTPPath: sdkuri.PathJoin("/latest/dynamic", p),
}
output := &metadataOutput{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index b8b2940d74..8f35b3464b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -5,6 +5,10 @@
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environment variable is set to true, (case insensitive).
+//
+// The endpoint of the EC2 IMDS client can be configured via the environment
+// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
+// Session. See aws/session#Options.EC2IMDSEndpoint for more details.
package ec2metadata
import (
@@ -12,6 +16,7 @@ import (
"errors"
"io"
"net/http"
+ "net/url"
"os"
"strconv"
"strings"
@@ -41,7 +46,7 @@ const (
enableTokenProviderHandlerName = "enableTokenProviderHandler"
// TTL constants
- defaultTTL = 21600 * time.Second
+ defaultTTL = 21600 * time.Second
ttlExpirationWindow = 30 * time.Second
)
@@ -69,6 +74,9 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
// a client when not using a session. Generally using just New with a session
// is preferred.
//
+// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS
+// client is able to communicate with the EC2 IMDS API.
+//
// If an unmodified HTTP client is provided from the stdlib default, or no client
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
@@ -86,6 +94,15 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
cfg.MaxRetries = aws.Int(2)
}
+ if u, err := url.Parse(endpoint); err == nil {
+ // Remove path from the endpoint since it will be added by requests.
+ // This is an artifact of the SDK adding `/latest` to the endpoint for
+ // EC2 IMDS, but this is now moved to the operation definition.
+ u.Path = ""
+ u.RawPath = ""
+ endpoint = u.String()
+ }
+
svc := &EC2Metadata{
Client: client.New(
cfg,
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
index d0a3a020d8..4b29f190bf 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
@@ -87,6 +87,7 @@ func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) {
// If the error code status is 401, we enable the token provider
if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil &&
e.StatusCode() == http.StatusUnauthorized {
+ t.token.Store(ec2Token{})
atomic.StoreUint32(&t.disabled, 0)
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 1ad2c42bb5..3cc48800d6 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -194,12 +194,42 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "access-analyzer-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "access-analyzer-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "access-analyzer-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "access-analyzer-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"acm": service{
@@ -262,6 +292,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -271,6 +302,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -312,11 +344,27 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "airflow": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"api.detective": service{
Defaults: endpoint{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -325,14 +373,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "api.detective-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "api.detective-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "api.detective-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "api.detective-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"api.ecr": service{
@@ -422,6 +496,30 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ "fips-dkr-us-east-1": endpoint{
+ Hostname: "ecr-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-dkr-us-east-2": endpoint{
+ Hostname: "ecr-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-dkr-us-west-1": endpoint{
+ Hostname: "ecr-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-dkr-us-west-2": endpoint{
+ Hostname: "ecr-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
"fips-us-east-1": endpoint{
Hostname: "ecr-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -507,6 +605,12 @@ var awsPartition = partition{
},
},
},
+ "api.fleethub.iot": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
"api.mediatailor": service{
Endpoints: endpoints{
@@ -533,6 +637,7 @@ var awsPartition = partition{
"api.sagemaker": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -542,6 +647,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -602,6 +708,38 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "app-integrations": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appflow": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"application-autoscaling": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -632,6 +770,7 @@ var awsPartition = partition{
"appmesh": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -641,6 +780,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -662,6 +802,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
@@ -679,22 +820,31 @@ var awsPartition = partition{
"appsync": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"athena": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -704,15 +854,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "athena-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "athena-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "athena-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "athena-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"autoscaling": service{
@@ -747,6 +922,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -756,6 +932,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -770,6 +947,7 @@ var awsPartition = partition{
"backup": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -779,6 +957,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -802,6 +981,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -867,12 +1047,11 @@ var awsPartition = partition{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
Defaults: endpoint{
- SSLCommonName: "service.chime.aws.amazon.com",
- Protocols: []string{"https"},
+ Protocols: []string{"https"},
},
Endpoints: endpoints{
"aws-global": endpoint{
- Hostname: "service.chime.aws.amazon.com",
+ Hostname: "chime.us-east-1.amazonaws.com",
Protocols: []string{"https"},
CredentialScope: credentialScope{
Region: "us-east-1",
@@ -892,6 +1071,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1002,6 +1182,7 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1011,6 +1192,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1086,9 +1268,25 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "codeartifact": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"codebuild": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1098,6 +1296,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1145,6 +1344,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1211,9 +1411,25 @@ var awsPartition = partition{
},
},
},
+ "codeguru-reviewer": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"codepipeline": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -1222,6 +1438,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1311,8 +1528,10 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
"fips-us-east-1": endpoint{
Hostname: "cognito-identity-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -1331,8 +1550,10 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -1346,8 +1567,10 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
"fips-us-east-1": endpoint{
Hostname: "cognito-idp-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -1366,8 +1589,10 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -1457,6 +1682,7 @@ var awsPartition = partition{
"config": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1466,15 +1692,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "config-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "config-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "config-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "config-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"connect": service{
@@ -1489,6 +1740,14 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "contact-lens": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"cur": service{
Endpoints: endpoints{
@@ -1538,6 +1797,7 @@ var awsPartition = partition{
"datasync": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1547,6 +1807,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1667,6 +1928,8 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -1765,6 +2028,12 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ "sa-east-1": endpoint{
+ Hostname: "rds.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
"us-east-1": endpoint{
Hostname: "rds.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -1788,6 +2057,7 @@ var awsPartition = partition{
"ds": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1797,6 +2067,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1902,6 +2173,61 @@ var awsPartition = partition{
},
},
},
+ "ebs": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "ebs-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "ebs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "ebs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "ebs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "ebs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"ec2": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -2024,6 +2350,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2033,6 +2360,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2048,6 +2376,12 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
+ "fips-us-west-1": endpoint{
+ Hostname: "fips.eks.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
"fips-us-west-2": endpoint{
Hostname: "fips.eks.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
@@ -2058,6 +2392,7 @@ var awsPartition = partition{
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -2423,6 +2758,14 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "emr-containers": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"entitlement.marketplace": service{
Defaults: endpoint{
CredentialScope: credentialScope{
@@ -2516,6 +2859,7 @@ var awsPartition = partition{
"firehose": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2525,6 +2869,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2565,6 +2910,8 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2573,9 +2920,22 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "fips-af-south-1": endpoint{
+ Hostname: "fms-fips.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ "fips-ap-east-1": endpoint{
+ Hostname: "fms-fips.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
"fips-ap-northeast-1": endpoint{
Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -2618,6 +2978,12 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ "fips-eu-south-1": endpoint{
+ Hostname: "fms-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
"fips-eu-west-1": endpoint{
Hostname: "fms-fips.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -2636,6 +3002,12 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ "fips-me-south-1": endpoint{
+ Hostname: "fms-fips.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
"fips-sa-east-1": endpoint{
Hostname: "fms-fips.sa-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -2666,11 +3038,12 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"forecast": service{
@@ -2706,19 +3079,56 @@ var awsPartition = partition{
"fsx": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-prod-ca-central-1": endpoint{
+ Hostname: "fsx-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-prod-us-east-1": endpoint{
+ Hostname: "fsx-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-prod-us-east-2": endpoint{
+ Hostname: "fsx-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-prod-us-west-1": endpoint{
+ Hostname: "fsx-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-prod-us-west-2": endpoint{
+ Hostname: "fsx-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"gamelift": service{
@@ -2800,6 +3210,7 @@ var awsPartition = partition{
"glue": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2809,6 +3220,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2866,12 +3278,25 @@ var awsPartition = partition{
"groundstation": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
- "me-south-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-2": endpoint{
+ Hostname: "groundstation-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "groundstation-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"guardduty": service{
@@ -2880,6 +3305,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2889,6 +3315,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2927,9 +3354,28 @@ var awsPartition = partition{
"health": service{
Endpoints: endpoints{
+ "fips-us-east-2": endpoint{
+ Hostname: "health-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ },
+ },
+ "healthlake": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
"us-east-1": endpoint{},
},
},
+ "honeycode": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
"iam": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -2949,6 +3395,24 @@ var awsPartition = partition{
},
},
},
+ "identitystore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"importexport": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -3036,6 +3500,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
@@ -3161,6 +3626,23 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "iotwireless": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{
+ Hostname: "api.iotwireless.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "api.iotwireless.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"kafka": service{
Endpoints: endpoints{
@@ -3173,6 +3655,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3245,6 +3728,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3252,6 +3736,7 @@ var awsPartition = partition{
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -3303,6 +3788,7 @@ var awsPartition = partition{
"lakeformation": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -3311,14 +3797,39 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"lambda": service{
@@ -3373,6 +3884,7 @@ var awsPartition = partition{
"license-manager": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3382,6 +3894,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3452,11 +3965,47 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "logs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "logs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "logs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "logs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lookoutvision": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
- "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -3486,6 +4035,55 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "macie2": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "macie2-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "macie2-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "macie2-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "macie2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"managedblockchain": service{
Endpoints: endpoints{
@@ -3493,6 +4091,7 @@ var awsPartition = partition{
"ap-northeast-2": endpoint{},
"ap-southeast-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
},
},
@@ -3587,10 +4186,28 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "medialive-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "medialive-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "medialive-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"mediapackage": service{
@@ -3661,6 +4278,8 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -3678,13 +4297,26 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"monitoring": service{
@@ -3750,6 +4382,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3798,6 +4431,12 @@ var awsPartition = partition{
"neptune": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Hostname: "rds.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
"ap-northeast-1": endpoint{
Hostname: "rds.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -3870,6 +4509,12 @@ var awsPartition = partition{
Region: "me-south-1",
},
},
+ "sa-east-1": endpoint{
+ Hostname: "rds.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
"us-east-1": endpoint{
Hostname: "rds.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -3882,6 +4527,12 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
+ "us-west-1": endpoint{
+ Hostname: "rds.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
"us-west-2": endpoint{
Hostname: "rds.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
@@ -3893,6 +4544,24 @@ var awsPartition = partition{
"oidc": service{
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "oidc.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "oidc.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "oidc.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
"ap-southeast-1": endpoint{
Hostname: "oidc.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -3917,6 +4586,12 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ "eu-north-1": endpoint{
+ Hostname: "oidc.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
"eu-west-1": endpoint{
Hostname: "oidc.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -4005,14 +4680,17 @@ var awsPartition = partition{
"outposts": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -4047,6 +4725,7 @@ var awsPartition = partition{
},
},
"me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -4060,10 +4739,15 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"fips-us-east-1": endpoint{
Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -4196,6 +4880,18 @@ var awsPartition = partition{
},
},
},
+ "profile": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"projects.iot1click": service{
Endpoints: endpoints{
@@ -4225,6 +4921,7 @@ var awsPartition = partition{
"ram": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -4234,6 +4931,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -4365,13 +5063,44 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "rekognition-fips.ca-central-1": endpoint{
+ Hostname: "rekognition-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "rekognition-fips.us-east-1": endpoint{
+ Hostname: "rekognition-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "rekognition-fips.us-east-2": endpoint{
+ Hostname: "rekognition-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "rekognition-fips.us-west-1": endpoint{
+ Hostname: "rekognition-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "rekognition-fips.us-west-2": endpoint{
+ Hostname: "rekognition-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"resource-groups": service{
@@ -4446,6 +5175,12 @@ var awsPartition = partition{
Region: "us-east-1",
},
},
+ "fips-aws-global": endpoint{
+ Hostname: "route53-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
},
},
"route53domains": service{
@@ -4459,6 +5194,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -4468,6 +5204,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -4486,18 +5223,32 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "runtime-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "runtime-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"runtime.sagemaker": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -4507,6 +5258,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -4664,6 +5416,13 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ "ca-central-1-fips": endpoint{
+ Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
"eu-central-1": endpoint{
Hostname: "s3-control.eu-central-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
@@ -4883,12 +5642,36 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "securityhub-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "securityhub-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "securityhub-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "securityhub-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"serverlessrepo": service{
@@ -4955,6 +5738,7 @@ var awsPartition = partition{
"servicecatalog": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -4964,6 +5748,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -5002,6 +5787,7 @@ var awsPartition = partition{
"servicediscovery": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -5011,6 +5797,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "servicediscovery-fips": endpoint{
+ Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "servicequotas": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -5110,6 +5930,8 @@ var awsPartition = partition{
"snowball": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -5117,6 +5939,8 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -5132,6 +5956,12 @@ var awsPartition = partition{
Region: "ap-northeast-2",
},
},
+ "fips-ap-northeast-3": endpoint{
+ Hostname: "snowball-fips.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
"fips-ap-south-1": endpoint{
Hostname: "snowball-fips.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -5339,6 +6169,12 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "ssm-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
"fips-us-east-1": endpoint{
Hostname: "ssm-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -5365,34 +6201,10 @@ var awsPartition = partition{
},
"me-south-1": endpoint{},
"sa-east-1": endpoint{},
- "ssm-facade-fips-us-east-1": endpoint{
- Hostname: "ssm-facade-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- "ssm-facade-fips-us-east-2": endpoint{
- Hostname: "ssm-facade-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- "ssm-facade-fips-us-west-1": endpoint{
- Hostname: "ssm-facade-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- "ssm-facade-fips-us-west-2": endpoint{
- Hostname: "ssm-facade-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"states": service{
@@ -5461,12 +6273,18 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips": endpoint{
+ Hostname: "storagegateway-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"streams.dynamodb": service{
@@ -5726,9 +6544,14 @@ var awsPartition = partition{
"transcribestreaming": service{
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -5748,11 +6571,41 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "transfer-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "transfer-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "transfer-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "transfer-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "transfer-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"translate": service{
@@ -5818,6 +6671,12 @@ var awsPartition = partition{
"waf-regional": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{
+ Hostname: "waf-regional.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
"ap-east-1": endpoint{
Hostname: "waf-regional.ap-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -5872,6 +6731,12 @@ var awsPartition = partition{
Region: "eu-north-1",
},
},
+ "eu-south-1": endpoint{
+ Hostname: "waf-regional.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
"eu-west-1": endpoint{
Hostname: "waf-regional.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -5890,6 +6755,12 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ "fips-af-south-1": endpoint{
+ Hostname: "waf-regional-fips.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
"fips-ap-east-1": endpoint{
Hostname: "waf-regional-fips.ap-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -5944,6 +6815,12 @@ var awsPartition = partition{
Region: "eu-north-1",
},
},
+ "fips-eu-south-1": endpoint{
+ Hostname: "waf-regional-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
"fips-eu-west-1": endpoint{
Hostname: "waf-regional-fips.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -6080,9 +6957,21 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "workspaces-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "workspaces-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"xray": service{
@@ -6102,12 +6991,36 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "xray-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "xray-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "xray-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "xray-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
},
@@ -6142,6 +7055,13 @@ var awscnPartition = partition{
},
},
Services: services{
+ "access-analyzer": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"acm": service{
Endpoints: endpoints{
@@ -6192,7 +7112,8 @@ var awscnPartition = partition{
"appsync": service{
Endpoints: endpoints{
- "cn-north-1": endpoint{},
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
},
},
"athena": service{
@@ -6211,6 +7132,15 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "autoscaling-plans": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"backup": service{
Endpoints: endpoints{
@@ -6225,6 +7155,32 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "budgets": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "budgets.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "ce.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"cloudformation": service{
Endpoints: endpoints{
@@ -6287,6 +7243,12 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "cur": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
"dax": service{
Endpoints: endpoints{
@@ -6307,6 +7269,17 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "docdb": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{
+ Hostname: "rds.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"ds": service{
Endpoints: endpoints{
@@ -6323,6 +7296,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "ebs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"ec2": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -6431,6 +7411,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "fsx": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"gamelift": service{
Endpoints: endpoints{
@@ -6493,6 +7480,29 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "iotanalytics": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "iotevents": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "ioteventsdata": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
"iotsecuredtunneling": service{
Endpoints: endpoints{
@@ -6514,6 +7524,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"kms": service{
Endpoints: endpoints{
@@ -6521,6 +7538,12 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "lakeformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"lambda": service{
Endpoints: endpoints{
@@ -6573,12 +7596,32 @@ var awscnPartition = partition{
},
},
},
+ "organizations": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "organizations.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"polly": service{
Endpoints: endpoints{
"cn-northwest-1": endpoint{},
},
},
+ "ram": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"rds": service{
Endpoints: endpoints{
@@ -6593,6 +7636,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"route53": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@@ -6658,6 +7708,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "securityhub": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"serverlessrepo": service{
Defaults: endpoint{
Protocols: []string{"https"},
@@ -6671,6 +7728,13 @@ var awscnPartition = partition{
},
},
},
+ "servicediscovery": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"sms": service{
Endpoints: endpoints{
@@ -6681,13 +7745,20 @@ var awscnPartition = partition{
"snowball": service{
Endpoints: endpoints{
- "cn-north-1": endpoint{},
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
"fips-cn-north-1": endpoint{
Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-north-1",
},
},
+ "fips-cn-northwest-1": endpoint{
+ Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
},
},
"sns": service{
@@ -6842,15 +7913,35 @@ var awsusgovPartition = partition{
"access-analyzer": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"acm": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "acm.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "acm.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"acm-pca": service{
@@ -6877,6 +7968,18 @@ var awsusgovPartition = partition{
"api.ecr": service{
Endpoints: endpoints{
+ "fips-dkr-us-gov-east-1": endpoint{
+ Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-dkr-us-gov-west-1": endpoint{
+ Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"fips-us-gov-east-1": endpoint{
Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -6907,6 +8010,18 @@ var awsusgovPartition = partition{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1-fips-secondary": endpoint{
+ Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"apigateway": service{
@@ -6925,8 +8040,12 @@ var awsusgovPartition = partition{
},
},
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
},
},
"appstream2": service{
@@ -6981,6 +8100,17 @@ var awsusgovPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "backup": service{
+
+ Endpoints: endpoints{
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -7127,6 +8257,12 @@ var awsusgovPartition = partition{
"cognito-identity": service{
Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-west-1": endpoint{},
},
},
@@ -7159,12 +8295,30 @@ var awsusgovPartition = partition{
"comprehendmedical": service{
Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-west-1": endpoint{},
},
},
"config": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "config.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "config.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -7218,6 +8372,17 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "docdb": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Hostname: "rds.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"ds": service{
Endpoints: endpoints{
@@ -7256,6 +8421,13 @@ var awsusgovPartition = partition{
},
},
},
+ "ebs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"ec2": service{
Endpoints: endpoints{
@@ -7308,6 +8480,18 @@ var awsusgovPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "eks.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "eks.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -7316,7 +8500,7 @@ var awsusgovPartition = partition{
Endpoints: endpoints{
"fips": endpoint{
- Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
+ Hostname: "elasticache.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -7365,13 +8549,13 @@ var awsusgovPartition = partition{
Endpoints: endpoints{
"fips-us-gov-east-1": endpoint{
- Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com",
+ Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
"fips-us-gov-west-1": endpoint{
- Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com",
+ Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -7464,6 +8648,25 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "fsx": service{
+
+ Endpoints: endpoints{
+ "fips-prod-us-gov-east-1": endpoint{
+ Hostname: "fsx-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-prod-us-gov-west-1": endpoint{
+ Hostname: "fsx-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"glacier": service{
Endpoints: endpoints{
@@ -7507,6 +8710,30 @@ var awsusgovPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "dataplane-us-gov-east-1": endpoint{
+ Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "dataplane-us-gov-west-1": endpoint{
+ Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-east-1": endpoint{
+ Hostname: "greengrass.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{
Hostname: "greengrass.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -7521,6 +8748,13 @@ var awsusgovPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "guardduty.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{},
"us-gov-west-1-fips": endpoint{
Hostname: "guardduty.us-gov-west-1.amazonaws.com",
@@ -7533,7 +8767,12 @@ var awsusgovPartition = partition{
"health": service{
Endpoints: endpoints{
- "us-gov-west-1": endpoint{},
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "health-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"iam": service{
@@ -7547,6 +8786,12 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ "iam-govcloud-fips": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"inspector": service{
@@ -7575,12 +8820,14 @@ var awsusgovPartition = partition{
},
},
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
"iotsecuredtunneling": service{
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
@@ -7594,6 +8841,23 @@ var awsusgovPartition = partition{
"kinesis": service{
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "kinesis.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "kinesis.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -7611,6 +8875,18 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "lakeformation": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
"lambda": service{
Endpoints: endpoints{
@@ -7652,8 +8928,18 @@ var awsusgovPartition = partition{
"logs": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "logs.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "logs.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"mediaconvert": service{
@@ -7757,7 +9043,18 @@ var awsusgovPartition = partition{
},
},
Endpoints: endpoints{
- "us-gov-west-1": endpoint{},
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "pinpoint.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"polly": service{
@@ -7818,6 +9115,12 @@ var awsusgovPartition = partition{
"rekognition": service{
Endpoints: endpoints{
+ "rekognition-fips.us-gov-west-1": endpoint{
+ Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-west-1": endpoint{},
},
},
@@ -7851,6 +9154,12 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ "fips-aws-us-gov-global": endpoint{
+ Hostname: "route53.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"route53resolver": service{
@@ -7875,7 +9184,7 @@ var awsusgovPartition = partition{
},
Endpoints: endpoints{
"fips-us-gov-west-1": endpoint{
- Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -7951,6 +9260,18 @@ var awsusgovPartition = partition{
"securityhub": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -8085,18 +9406,6 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
- "ssm-facade-fips-us-gov-east-1": endpoint{
- Hostname: "ssm-facade.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- "ssm-facade-fips-us-gov-west-1": endpoint{
- Hostname: "ssm-facade.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -8123,6 +9432,12 @@ var awsusgovPartition = partition{
"storagegateway": service{
Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -8232,6 +9547,25 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "transfer": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "transfer-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "transfer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"translate": service{
Defaults: endpoint{
Protocols: []string{"https"},
@@ -8249,12 +9583,24 @@ var awsusgovPartition = partition{
"waf-regional": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"fips-us-gov-west-1": endpoint{
Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
+ "us-gov-east-1": endpoint{
+ Hostname: "waf-regional.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{
Hostname: "waf-regional.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -8266,12 +9612,30 @@ var awsusgovPartition = partition{
"workspaces": service{
Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-west-1": endpoint{},
},
},
"xray": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "xray-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "xray-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -8576,6 +9940,12 @@ var awsisoPartition = partition{
},
},
},
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"snowball": service{
Endpoints: endpoints{
@@ -8598,6 +9968,12 @@ var awsisoPartition = partition{
},
},
},
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"states": service{
Endpoints: endpoints{
@@ -8641,6 +10017,28 @@ var awsisoPartition = partition{
"us-iso-east-1": endpoint{},
},
},
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "transcribestreaming": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"workspaces": service{
Endpoints: endpoints{
@@ -8676,6 +10074,17 @@ var awsisobPartition = partition{
},
},
Services: services{
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{
+ Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
"application-autoscaling": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -8704,6 +10113,12 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
"config": service{
Endpoints: endpoints{
@@ -8755,6 +10170,12 @@ var awsisobPartition = partition{
},
},
},
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
"elasticache": service{
Endpoints: endpoints{
@@ -8775,6 +10196,12 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
+ "es": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
"events": service{
Endpoints: endpoints{
@@ -8824,6 +10251,12 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
"license-manager": service{
Endpoints: endpoints{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
index eb2ac83c99..773613722f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -7,6 +7,8 @@ import (
"strings"
)
+var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`)
+
type partitions []partition
func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
@@ -124,7 +126,7 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (
defs := []endpoint{p.Defaults, s.Defaults}
- return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil
+ return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt)
}
func serviceList(ss services) []string {
@@ -233,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string {
return s[0]
}
-func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) {
var merged endpoint
for _, def := range defs {
merged.mergeIn(def)
@@ -260,6 +262,10 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [
region = signingRegion
}
+ if !validateInputRegion(region) {
+ return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided")
+ }
+
u := strings.Replace(hostname, "{service}", service, 1)
u = strings.Replace(u, "{region}", region, 1)
u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
@@ -274,7 +280,7 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [
SigningName: signingName,
SigningNameDerived: signingNameDerived,
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
- }
+ }, nil
}
func getEndpointScheme(protocols []string, disableSSL bool) string {
@@ -339,3 +345,7 @@ const (
boxedFalse
boxedTrue
)
+
+func validateInputRegion(region string) bool {
+ return regionValidationRegex.MatchString(region)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
index d9b37f4d32..2ba3c56c11 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -9,7 +9,8 @@ func isErrConnectionReset(err error) bool {
return false
}
- if strings.Contains(err.Error(), "connection reset") ||
+ if strings.Contains(err.Error(), "use of closed network connection") ||
+ strings.Contains(err.Error(), "connection reset") ||
strings.Contains(err.Error(), "broken pipe") {
return true
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
index fe6dac1f47..b0cef7575d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
@@ -9,6 +9,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/ssocreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
@@ -100,6 +101,9 @@ func resolveCredsFromProfile(cfg *aws.Config,
sharedCfg.Creds,
)
+ case sharedCfg.hasSSOConfiguration():
+ creds = resolveSSOCredentials(cfg, sharedCfg, handlers)
+
case len(sharedCfg.CredentialProcess) != 0:
// Get credentials from CredentialProcess
creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
@@ -151,6 +155,21 @@ func resolveCredsFromProfile(cfg *aws.Config,
return creds, nil
}
+func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) *credentials.Credentials {
+ cfgCopy := cfg.Copy()
+ cfgCopy.Region = &sharedCfg.SSORegion
+
+ return ssocreds.NewCredentials(
+ &Session{
+ Config: cfgCopy,
+ Handlers: handlers.Copy(),
+ },
+ sharedCfg.SSOAccountID,
+ sharedCfg.SSORoleName,
+ sharedCfg.SSOStartURL,
+ )
+}
+
// valid credential source values
const (
credSourceEc2Metadata = "Ec2InstanceMetadata"
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
new file mode 100644
index 0000000000..593aedc421
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
@@ -0,0 +1,27 @@
+// +build go1.13
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCustomTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go
index ea9ebb6f6a..1bf31cf8e5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go
@@ -1,4 +1,4 @@
-// +build go1.7
+// +build !go1.13,go1.7
package session
@@ -10,7 +10,7 @@ import (
// Transport that should be used when a custom CA bundle is specified with the
// SDK.
-func getCABundleTransport() *http.Transport {
+func getCustomTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go
index fec39dfc12..253d7bc9d5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go
@@ -10,7 +10,7 @@ import (
// Transport that should be used when a custom CA bundle is specified with the
// SDK.
-func getCABundleTransport() *http.Transport {
+func getCustomTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go
index 1c5a5391e6..db24060544 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go
@@ -10,7 +10,7 @@ import (
// Transport that should be used when a custom CA bundle is specified with the
// SDK.
-func getCABundleTransport() *http.Transport {
+func getCustomTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
index 7ec66e7e58..9419b518d5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -208,6 +208,8 @@ env values as well.
AWS_SDK_LOAD_CONFIG=1
+Custom Shared Config and Credential Files
+
Shared credentials file path can be set to instruct the SDK to use an alternative
file for the shared credentials. If not set the file will be loaded from
$HOME/.aws/credentials on Linux/Unix based systems, and
@@ -222,6 +224,8 @@ $HOME/.aws/config on Linux/Unix based systems, and
AWS_CONFIG_FILE=$HOME/my_shared_config
+Custom CA Bundle
+
Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
will use instead of the default system's root CA bundle. Use this only
if you want to replace the CA bundle the SDK uses for TLS requests.
@@ -241,5 +245,45 @@ over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
Setting a custom HTTPClient in the aws.Config options will override this setting.
To use this option and custom HTTP client, the HTTP client needs to be provided
when creating the session. Not the service client.
+
+Custom Client TLS Certificate
+
+The SDK supports the environment and session option being configured with
+Client TLS certificates that are sent as a part of the client's TLS handshake
+for client authentication. If used, both Cert and Key values are required. If
+one is missing, or either fail to load the contents of the file an error will
+be returned.
+
+HTTP Client's Transport concrete implementation must be a http.Transport
+or creating the session will fail.
+
+ AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
+ AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
+
+This can also be configured via the session.Options ClientTLSCert and ClientTLSKey.
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ ClientTLSCert: myCertFile,
+ ClientTLSKey: myKeyFile,
+ })
+
+Custom EC2 IMDS Endpoint
+
+The endpoint of the EC2 IMDS client can be configured via the environment
+variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
+Session. See Options.EC2IMDSEndpoint for more details.
+
+ AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
+
+If using an URL with an IPv6 address literal, the IPv6 address
+component must be enclosed in square brackets.
+
+ AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
+
+The custom EC2 IMDS endpoint can also be specified via the Session options.
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ EC2IMDSEndpoint: "http://[::1]",
+ })
*/
package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index c1e0e9c954..3cd5d4b5ae 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -101,6 +101,18 @@ type envConfig struct {
// AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
CustomCABundle string
+ // Sets the TLC client certificate that should be used by the SDK's HTTP transport
+ // when making requests. The certificate must be paired with a TLS client key file.
+ //
+ // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
+ ClientTLSCert string
+
+ // Sets the TLC client key that should be used by the SDK's HTTP transport
+ // when making requests. The key must be paired with a TLS client certificate file.
+ //
+ // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
+ ClientTLSKey string
+
csmEnabled string
CSMEnabled *bool
CSMPort string
@@ -148,6 +160,11 @@ type envConfig struct {
//
// AWS_S3_USE_ARN_REGION=true
S3UseARNRegion bool
+
+ // Specifies the alternative endpoint to use for EC2 IMDS.
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
+ EC2IMDSEndpoint string
}
var (
@@ -211,6 +228,18 @@ var (
s3UseARNRegionEnvKey = []string{
"AWS_S3_USE_ARN_REGION",
}
+ ec2IMDSEndpointEnvKey = []string{
+ "AWS_EC2_METADATA_SERVICE_ENDPOINT",
+ }
+ useCABundleKey = []string{
+ "AWS_CA_BUNDLE",
+ }
+ useClientTLSCert = []string{
+ "AWS_SDK_GO_CLIENT_TLS_CERT",
+ }
+ useClientTLSKey = []string{
+ "AWS_SDK_GO_CLIENT_TLS_KEY",
+ }
)
// loadEnvConfig retrieves the SDK's environment configuration.
@@ -294,7 +323,9 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
cfg.SharedConfigFile = defaults.SharedConfigFilename()
}
- cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+ setFromEnvVal(&cfg.CustomCABundle, useCABundleKey)
+ setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert)
+ setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey)
var err error
// STS Regional Endpoint variable
@@ -332,6 +363,8 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
}
}
+ setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey)
+
return cfg, nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 0ff4996051..038ae222ff 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -25,11 +25,18 @@ const (
// ErrCodeSharedConfig represents an error that occurs in the shared
// configuration logic
ErrCodeSharedConfig = "SharedConfigErr"
+
+ // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle.
+ ErrCodeLoadCustomCABundle = "LoadCustomCABundleError"
+
+ // ErrCodeLoadClientTLSCert error code for unable to load client TLS
+ // certificate or key
+ ErrCodeLoadClientTLSCert = "LoadClientTLSCertError"
)
// ErrSharedConfigSourceCollision will be returned if a section contains both
// source_profile and credential_source
-var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil)
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
// variables are empty and Environment was set as the credential source
@@ -48,6 +55,8 @@ var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credenti
type Session struct {
Config *aws.Config
Handlers request.Handlers
+
+ options Options
}
// New creates a new instance of the handlers merging in the provided configs
@@ -99,7 +108,7 @@ func New(cfgs ...*aws.Config) *Session {
return s
}
- s := deprecatedNewSession(cfgs...)
+ s := deprecatedNewSession(envCfg, cfgs...)
if envErr != nil {
msg := "failed to load env config"
s.logDeprecatedNewSessionError(msg, envErr, cfgs)
@@ -227,22 +236,68 @@ type Options struct {
// the SDK will use instead of the default system's root CA bundle. Use this
// only if you want to replace the CA bundle the SDK uses for TLS requests.
//
- // Enabling this option will attempt to merge the Transport into the SDK's HTTP
- // client. If the client's Transport is not a http.Transport an error will be
- // returned. If the Transport's TLS config is set this option will cause the SDK
+ // HTTP Client's Transport concrete implementation must be a http.Transport
+ // or creating the session will fail.
+ //
+ // If the Transport's TLS config is set this option will cause the SDK
// to overwrite the Transport's TLS config's RootCAs value. If the CA
// bundle reader contains multiple certificates all of them will be loaded.
//
- // The Session option CustomCABundle is also available when creating sessions
- // to also enable this feature. CustomCABundle session option field has priority
- // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+ // Can also be specified via the environment variable:
+ //
+ // AWS_CA_BUNDLE=$HOME/ca_bundle
+ //
+ // Can also be specified via the shared config field:
+ //
+ // ca_bundle = $HOME/ca_bundle
CustomCABundle io.Reader
+ // Reader for the TLC client certificate that should be used by the SDK's
+ // HTTP transport when making requests. The certificate must be paired with
+ // a TLS client key file. Will be ignored if both are not provided.
+ //
+ // HTTP Client's Transport concrete implementation must be a http.Transport
+ // or creating the session will fail.
+ //
+ // Can also be specified via the environment variable:
+ //
+ // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
+ ClientTLSCert io.Reader
+
+ // Reader for the TLC client key that should be used by the SDK's HTTP
+ // transport when making requests. The key must be paired with a TLS client
+ // certificate file. Will be ignored if both are not provided.
+ //
+ // HTTP Client's Transport concrete implementation must be a http.Transport
+ // or creating the session will fail.
+ //
+ // Can also be specified via the environment variable:
+ //
+ // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
+ ClientTLSKey io.Reader
+
// The handlers that the session and all API clients will be created with.
// This must be a complete set of handlers. Use the defaults.Handlers()
// function to initialize this value before changing the handlers to be
// used by the SDK.
Handlers request.Handlers
+
+ // Allows specifying a custom endpoint to be used by the EC2 IMDS client
+ // when making requests to the EC2 IMDS API. The must endpoint value must
+ // include protocol prefix.
+ //
+ // If unset, will the EC2 IMDS client will use its default endpoint.
+ //
+ // Can also be specified via the environment variable,
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT.
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
+ //
+ // If using an URL with an IPv6 address literal, the IPv6 address
+ // component must be enclosed in square brackets.
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
+ EC2IMDSEndpoint string
}
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@@ -300,17 +355,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
envCfg.EnableSharedConfig = true
}
- // Only use AWS_CA_BUNDLE if session option is not provided.
- if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
- f, err := os.Open(envCfg.CustomCABundle)
- if err != nil {
- return nil, awserr.New("LoadCustomCABundleError",
- "failed to open custom CA bundle PEM file", err)
- }
- defer f.Close()
- opts.CustomCABundle = f
- }
-
return newSession(opts, envCfg, &opts.Config)
}
@@ -329,7 +373,25 @@ func Must(sess *Session, err error) *Session {
return sess
}
-func deprecatedNewSession(cfgs ...*aws.Config) *Session {
+// Wraps the endpoint resolver with a resolver that will return a custom
+// endpoint for EC2 IMDS.
+func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver {
+ return endpoints.ResolverFunc(
+ func(service, region string, opts ...func(*endpoints.Options)) (
+ endpoints.ResolvedEndpoint, error,
+ ) {
+ if service == ec2MetadataServiceID {
+ return endpoints.ResolvedEndpoint{
+ URL: endpoint,
+ SigningName: ec2MetadataServiceID,
+ SigningRegion: region,
+ }, nil
+ }
+ return resolver.EndpointFor(service, region)
+ })
+}
+
+func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session {
cfg := defaults.Config()
handlers := defaults.Handlers()
@@ -341,6 +403,11 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
// endpoints for service client configurations.
cfg.EndpointResolver = endpoints.DefaultResolver()
}
+
+ if len(envCfg.EC2IMDSEndpoint) != 0 {
+ cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint)
+ }
+
cfg.Credentials = defaults.CredChain(cfg, handlers)
// Reapply any passed in configs to override credentials if set
@@ -349,6 +416,9 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
s := &Session{
Config: cfg,
Handlers: handlers,
+ options: Options{
+ EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint,
+ },
}
initHandlers(s)
@@ -415,9 +485,14 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
return nil, err
}
+ if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil {
+ return nil, err
+ }
+
s := &Session{
Config: cfg,
Handlers: handlers,
+ options: opts,
}
initHandlers(s)
@@ -433,13 +508,6 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
}
}
- // Setup HTTP client with custom cert bundle if enabled
- if opts.CustomCABundle != nil {
- if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
- return nil, err
- }
- }
-
return s, nil
}
@@ -483,22 +551,83 @@ func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
return csmConfig{}, nil
}
-func loadCustomCABundle(s *Session, bundle io.Reader) error {
+func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
+ // CA Bundle can be specified in both environment variable shared config file.
+ var caBundleFilename = envCfg.CustomCABundle
+ if len(caBundleFilename) == 0 {
+ caBundleFilename = sharedCfg.CustomCABundle
+ }
+
+ // Only use environment value if session option is not provided.
+ customTLSOptions := map[string]struct {
+ filename string
+ field *io.Reader
+ errCode string
+ }{
+ "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle},
+ "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert},
+ "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert},
+ }
+ for name, v := range customTLSOptions {
+ if len(v.filename) != 0 && *v.field == nil {
+ f, err := os.Open(v.filename)
+ if err != nil {
+ return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err)
+ }
+ defer f.Close()
+ *v.field = f
+ }
+ }
+
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil {
+ return err
+ }
+ }
+
+ // Setup HTTP client TLS certificate and key for client TLS authentication.
+ if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil {
+ if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil {
+ return err
+ }
+ } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil {
+ // Do nothing if neither values are available.
+
+ } else {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided",
+ opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil)
+ }
+
+ return nil
+}
+
+func getHTTPTransport(client *http.Client) (*http.Transport, error) {
var t *http.Transport
- switch v := s.Config.HTTPClient.Transport.(type) {
+ switch v := client.Transport.(type) {
case *http.Transport:
t = v
default:
- if s.Config.HTTPClient.Transport != nil {
- return awserr.New("LoadCustomCABundleError",
- "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+ if client.Transport != nil {
+ return nil, fmt.Errorf("unsupported transport, %T", client.Transport)
}
}
if t == nil {
// Nil transport implies `http.DefaultTransport` should be used. Since
// the SDK cannot modify, nor copy the `DefaultTransport` specifying
// the values the next closest behavior.
- t = getCABundleTransport()
+ t = getCustomTransport()
+ }
+
+ return t, nil
+}
+
+func loadCustomCABundle(client *http.Client, bundle io.Reader) error {
+ t, err := getHTTPTransport(client)
+ if err != nil {
+ return awserr.New(ErrCodeLoadCustomCABundle,
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", err)
}
p, err := loadCertPool(bundle)
@@ -510,7 +639,7 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
}
t.TLSClientConfig.RootCAs = p
- s.Config.HTTPClient.Transport = t
+ client.Transport = t
return nil
}
@@ -518,19 +647,57 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
func loadCertPool(r io.Reader) (*x509.CertPool, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
- return nil, awserr.New("LoadCustomCABundleError",
+ return nil, awserr.New(ErrCodeLoadCustomCABundle,
"failed to read custom CA bundle PEM file", err)
}
p := x509.NewCertPool()
if !p.AppendCertsFromPEM(b) {
- return nil, awserr.New("LoadCustomCABundleError",
+ return nil, awserr.New(ErrCodeLoadCustomCABundle,
"failed to load custom CA bundle PEM file", err)
}
return p, nil
}
+func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error {
+ t, err := getHTTPTransport(client)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to get usable HTTP transport from client", err)
+ }
+
+ cert, err := ioutil.ReadAll(certFile)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to get read client TLS cert file", err)
+ }
+
+ key, err := ioutil.ReadAll(keyFile)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to get read client TLS key file", err)
+ }
+
+ clientCert, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to load x509 key pair from client cert", err)
+ }
+
+ tlsCfg := t.TLSClientConfig
+ if tlsCfg == nil {
+ tlsCfg = &tls.Config{}
+ }
+
+ tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert)
+
+ t.TLSClientConfig = tlsCfg
+ client.Transport = t
+
+ return nil
+}
+
func mergeConfigSrcs(cfg, userCfg *aws.Config,
envCfg envConfig, sharedCfg sharedConfig,
handlers request.Handlers,
@@ -570,6 +737,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
endpoints.LegacyS3UsEast1Endpoint,
})
+ ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint
+ if len(ec2IMDSEndpoint) == 0 {
+ ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint
+ }
+ if len(ec2IMDSEndpoint) != 0 {
+ cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint)
+ }
+
// Configure credentials if not already set by the user when creating the
// Session.
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
@@ -627,6 +802,7 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
Handlers: s.Handlers.Copy(),
+ options: s.options,
}
initHandlers(newSession)
@@ -665,6 +841,8 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi
}
}
+const ec2MetadataServiceID = "ec2metadata"
+
func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index 680805a38a..5ab05d56cc 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -2,6 +2,7 @@ package session
import (
"fmt"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -25,6 +26,12 @@ const (
roleSessionNameKey = `role_session_name` // optional
roleDurationSecondsKey = "duration_seconds" // optional
+ // AWS Single Sign-On (AWS SSO) group
+ ssoAccountIDKey = "sso_account_id"
+ ssoRegionKey = "sso_region"
+ ssoRoleNameKey = "sso_role_name"
+ ssoStartURL = "sso_start_url"
+
// CSM options
csmEnabledKey = `csm_enabled`
csmHostKey = `csm_host`
@@ -34,6 +41,9 @@ const (
// Additional Config fields
regionKey = `region`
+ // custom CA Bundle filename
+ customCABundleKey = `ca_bundle`
+
// endpoint discovery group
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
@@ -75,6 +85,11 @@ type sharedConfig struct {
CredentialProcess string
WebIdentityTokenFile string
+ SSOAccountID string
+ SSORegion string
+ SSORoleName string
+ SSOStartURL string
+
RoleARN string
RoleSessionName string
ExternalID string
@@ -90,6 +105,15 @@ type sharedConfig struct {
// region
Region string
+ // CustomCABundle is the file path to a PEM file the SDK will read and
+ // use to configure the HTTP transport with additional CA certs that are
+ // not present in the platforms default CA store.
+ //
+ // This value will be ignored if the file does not exist.
+ //
+ // ca_bundle
+ CustomCABundle string
+
// EnableEndpointDiscovery can be enabled in the shared config by setting
// endpoint_discovery_enabled to true
//
@@ -205,9 +229,9 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
cfg.clearAssumeRoleOptions()
} else {
// First time a profile has been seen, It must either be a assume role
- // or credentials. Assert if the credential type requires a role ARN,
- // the ARN is also set.
- if err := cfg.validateCredentialsRequireARN(profile); err != nil {
+ // credentials, or SSO. Assert if the credential type requires a role ARN,
+ // the ARN is also set, or validate that the SSO configuration is complete.
+ if err := cfg.validateCredentialsConfig(profile); err != nil {
return err
}
}
@@ -276,6 +300,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
updateString(&cfg.SourceProfileName, section, sourceProfileKey)
updateString(&cfg.CredentialSource, section, credentialSourceKey)
updateString(&cfg.Region, section, regionKey)
+ updateString(&cfg.CustomCABundle, section, customCABundleKey)
if section.Has(roleDurationSecondsKey) {
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
@@ -299,6 +324,12 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
}
cfg.S3UsEast1RegionalEndpoint = sre
}
+
+ // AWS Single Sign-On (AWS SSO)
+ updateString(&cfg.SSOAccountID, section, ssoAccountIDKey)
+ updateString(&cfg.SSORegion, section, ssoRegionKey)
+ updateString(&cfg.SSORoleName, section, ssoRoleNameKey)
+ updateString(&cfg.SSOStartURL, section, ssoStartURL)
}
updateString(&cfg.CredentialProcess, section, credentialProcessKey)
@@ -329,6 +360,18 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
return nil
}
+func (cfg *sharedConfig) validateCredentialsConfig(profile string) error {
+ if err := cfg.validateCredentialsRequireARN(profile); err != nil {
+ return err
+ }
+
+ if err := cfg.validateSSOConfiguration(profile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
var credSource string
@@ -358,6 +401,7 @@ func (cfg *sharedConfig) validateCredentialType() error {
len(cfg.CredentialSource) != 0,
len(cfg.CredentialProcess) != 0,
len(cfg.WebIdentityTokenFile) != 0,
+ cfg.hasSSOConfiguration(),
) {
return ErrSharedConfigSourceCollision
}
@@ -365,12 +409,43 @@ func (cfg *sharedConfig) validateCredentialType() error {
return nil
}
+func (cfg *sharedConfig) validateSSOConfiguration(profile string) error {
+ if !cfg.hasSSOConfiguration() {
+ return nil
+ }
+
+ var missing []string
+ if len(cfg.SSOAccountID) == 0 {
+ missing = append(missing, ssoAccountIDKey)
+ }
+
+ if len(cfg.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(cfg.SSORoleName) == 0 {
+ missing = append(missing, ssoRoleNameKey)
+ }
+
+ if len(cfg.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURL)
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ profile, strings.Join(missing, ", "))
+ }
+
+ return nil
+}
+
func (cfg *sharedConfig) hasCredentials() bool {
switch {
case len(cfg.SourceProfileName) != 0:
case len(cfg.CredentialSource) != 0:
case len(cfg.CredentialProcess) != 0:
case len(cfg.WebIdentityTokenFile) != 0:
+ case cfg.hasSSOConfiguration():
case cfg.Creds.HasKeys():
default:
return false
@@ -394,6 +469,18 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() {
cfg.SourceProfileName = ""
}
+func (cfg *sharedConfig) hasSSOConfiguration() bool {
+ switch {
+ case len(cfg.SSOAccountID) != 0:
+ case len(cfg.SSORegion) != 0:
+ case len(cfg.SSORoleName) != 0:
+ case len(cfg.SSOStartURL) != 0:
+ default:
+ return false
+ }
+ return true
+}
+
func oneOrNone(bs ...bool) bool {
var count int
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 55a35a03ea..cf26997eb2 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.31.6"
+const SDKVersion = "1.37.0"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
index cf9fad81e7..55fa73ebcf 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -63,9 +63,10 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenNone: MarkCompleteState,
},
ASTKindEqualExpr: map[TokenType]int{
- TokenLit: ValueState,
- TokenWS: SkipTokenState,
- TokenNL: SkipState,
+ TokenLit: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ TokenNone: SkipState,
},
ASTKindStatement: map[TokenType]int{
TokenLit: SectionState,
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
index d7d42db0a6..1f1d27aea4 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
@@ -1,9 +1,10 @@
package protocol
import (
- "strings"
-
"github.com/aws/aws-sdk-go/aws/request"
+ "net"
+ "strconv"
+ "strings"
)
// ValidateEndpointHostHandler is a request handler that will validate the
@@ -22,8 +23,26 @@ var ValidateEndpointHostHandler = request.NamedHandler{
// 3986 host. Returns error if the host is not valid.
func ValidateEndpointHost(opName, host string) error {
paramErrs := request.ErrInvalidParams{Context: opName}
- labels := strings.Split(host, ".")
+ var hostname string
+ var port string
+ var err error
+
+ if strings.Contains(host, ":") {
+ hostname, port, err = net.SplitHostPort(host)
+
+ if err != nil {
+ paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host))
+ }
+
+ if !ValidPortNumber(port) {
+ paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port))
+ }
+ } else {
+ hostname = host
+ }
+
+ labels := strings.Split(hostname, ".")
for i, label := range labels {
if i == len(labels)-1 && len(label) == 0 {
// Allow trailing dot for FQDN hosts.
@@ -36,7 +55,11 @@ func ValidateEndpointHost(opName, host string) error {
}
}
- if len(host) > 255 {
+ if len(hostname) == 0 {
+ paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1))
+ }
+
+ if len(hostname) > 255 {
paramErrs.Add(request.NewErrParamMaxLen(
"endpoint host", 255, host,
))
@@ -66,3 +89,16 @@ func ValidHostLabel(label string) bool {
return true
}
+
+// ValidPortNumber return if the port is valid RFC 3986 port
+func ValidPortNumber(port string) bool {
+ i, err := strconv.Atoi(port)
+ if err != nil {
+ return false
+ }
+
+ if i < 0 || i > 65535 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
index 5e9499699b..8b2c9bbeba 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "math/big"
"reflect"
"strings"
"time"
@@ -15,6 +16,8 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
+var millisecondsFloat = new(big.Float).SetInt64(1e3)
+
// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
// type. The value to unmarshal the json document into must be a pointer to the
// type.
@@ -39,7 +42,9 @@ func UnmarshalJSONError(v interface{}, stream io.Reader) error {
func UnmarshalJSON(v interface{}, stream io.Reader) error {
var out interface{}
- err := json.NewDecoder(stream).Decode(&out)
+ decoder := json.NewDecoder(stream)
+ decoder.UseNumber()
+ err := decoder.Decode(&out)
if err == io.EOF {
return nil
} else if err != nil {
@@ -54,7 +59,9 @@ func UnmarshalJSON(v interface{}, stream io.Reader) error {
func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error {
var out interface{}
- err := json.NewDecoder(stream).Decode(&out)
+ decoder := json.NewDecoder(stream)
+ decoder.UseNumber()
+ err := decoder.Decode(&out)
if err == io.EOF {
return nil
} else if err != nil {
@@ -254,16 +261,31 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag
default:
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
- case float64:
+ case json.Number:
switch value.Interface().(type) {
case *int64:
- di := int64(d)
+ // Retain the old behavior where we would just truncate the float64
+ // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt
+ f, err := d.Float64()
+ if err != nil {
+ return err
+ }
+ di := int64(f)
value.Set(reflect.ValueOf(&di))
case *float64:
- value.Set(reflect.ValueOf(&d))
+ f, err := d.Float64()
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(&f))
case *time.Time:
- // Time unmarshaled from a float64 can only be epoch seconds
- t := time.Unix(int64(d), 0).UTC()
+ float, ok := new(big.Float).SetString(d.String())
+ if !ok {
+ return fmt.Errorf("unsupported float time representation: %v", d.String())
+ }
+ float = float.Mul(float, millisecondsFloat)
+ ms, _ := float.Int64()
+ t := time.Unix(0, ms*1e6).UTC()
value.Set(reflect.ValueOf(&t))
default:
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
new file mode 100644
index 0000000000..2e0e205af3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
@@ -0,0 +1,59 @@
+// Package restjson provides RESTful JSON serialization of AWS
+// requests and responses.
+package restjson
+
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
+
+import (
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+// BuildHandler is a named request handler for building restjson protocol
+// requests
+var BuildHandler = request.NamedHandler{
+ Name: "awssdk.restjson.Build",
+ Fn: Build,
+}
+
+// UnmarshalHandler is a named request handler for unmarshaling restjson
+// protocol requests
+var UnmarshalHandler = request.NamedHandler{
+ Name: "awssdk.restjson.Unmarshal",
+ Fn: Unmarshal,
+}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restjson
+// protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{
+ Name: "awssdk.restjson.UnmarshalMeta",
+ Fn: UnmarshalMeta,
+}
+
+// Build builds a request for the REST JSON protocol.
+func Build(r *request.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 {
+ r.HTTPRequest.Header.Set("Content-Type", "application/json")
+ }
+ jsonrpc.Build(r)
+ }
+}
+
+// Unmarshal unmarshals a response body for the REST JSON protocol.
+func Unmarshal(r *request.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ jsonrpc.Unmarshal(r)
+ } else {
+ rest.Unmarshal(r)
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST JSON protocol.
+func UnmarshalMeta(r *request.Request) {
+ rest.UnmarshalMeta(r)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
new file mode 100644
index 0000000000..d756d8cc52
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
@@ -0,0 +1,134 @@
+package restjson
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ errorTypeHeader = "X-Amzn-Errortype"
+ errorMessageHeader = "X-Amzn-Errormessage"
+)
+
+// UnmarshalTypedError provides unmarshaling errors API response errors
+// for both typed and untyped errors.
+type UnmarshalTypedError struct {
+ exceptions map[string]func(protocol.ResponseMetadata) error
+}
+
+// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
+// set of exception names to the error unmarshalers
+func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
+ return &UnmarshalTypedError{
+ exceptions: exceptions,
+ }
+}
+
+// UnmarshalError attempts to unmarshal the HTTP response error as a known
+// error type. If unable to unmarshal the error type, the generic SDK error
+// type will be used.
+func (u *UnmarshalTypedError) UnmarshalError(
+ resp *http.Response,
+ respMeta protocol.ResponseMetadata,
+) (error, error) {
+
+ code := resp.Header.Get(errorTypeHeader)
+ msg := resp.Header.Get(errorMessageHeader)
+
+ body := resp.Body
+ if len(code) == 0 {
+ // If unable to get code from HTTP headers have to parse JSON message
+ // to determine what kind of exception this will be.
+ var buf bytes.Buffer
+ var jsonErr jsonErrorResponse
+ teeReader := io.TeeReader(resp.Body, &buf)
+ err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
+ if err != nil {
+ return nil, err
+ }
+
+ body = ioutil.NopCloser(&buf)
+ code = jsonErr.Code
+ msg = jsonErr.Message
+ }
+
+ // If code has colon separators remove them so can compare against modeled
+ // exception names.
+ code = strings.SplitN(code, ":", 2)[0]
+
+ if fn, ok := u.exceptions[code]; ok {
+ // If exception code is know, use associated constructor to get a value
+ // for the exception that the JSON body can be unmarshaled into.
+ v := fn(respMeta)
+ if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil {
+ return nil, err
+ }
+
+ if err := rest.UnmarshalResponse(resp, v, true); err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ }
+
+ // fallback to unmodeled generic exceptions
+ return awserr.NewRequestFailure(
+ awserr.New(code, msg, nil),
+ respMeta.StatusCode,
+ respMeta.RequestID,
+ ), nil
+}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restjson
+// protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{
+ Name: "awssdk.restjson.UnmarshalError",
+ Fn: UnmarshalError,
+}
+
+// UnmarshalError unmarshals a response error for the REST JSON protocol.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var jsonErr jsonErrorResponse
+ err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal response error", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ code := r.HTTPResponse.Header.Get(errorTypeHeader)
+ if code == "" {
+ code = jsonErr.Code
+ }
+ msg := r.HTTPResponse.Header.Get(errorMessageHeader)
+ if msg == "" {
+ msg = jsonErr.Message
+ }
+
+ code = strings.SplitN(code, ":", 2)[0]
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(code, jsonErr.Message, nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
index 05d4ff5192..98f4caed91 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -27,8 +27,8 @@ const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
- // This format is used for output time without seconds precision
- ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z"
+ // This format is used for output time with fractional second precision up to milliseconds
+ ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z"
)
// IsKnownTimestampFormat returns if the timestamp format name
@@ -48,7 +48,7 @@ func IsKnownTimestampFormat(name string) bool {
// FormatTime returns a string value of the time.
func FormatTime(name string, t time.Time) string {
- t = t.UTC()
+ t = t.UTC().Truncate(time.Millisecond)
switch name {
case RFC822TimeFormatName:
@@ -56,7 +56,8 @@ func FormatTime(name string, t time.Time) string {
case ISO8601TimeFormatName:
return t.Format(ISO8601OutputTimeFormat)
case UnixTimeFormatName:
- return strconv.FormatInt(t.Unix(), 10)
+ ms := t.UnixNano() / int64(time.Millisecond)
+ return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64)
default:
panic("unknown timestamp format name, " + name)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go
index 6ea344e5e5..5deb417314 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go
@@ -67,14 +67,14 @@ func (c *CloudWatchLogs) AssociateKmsKeyRequest(input *AssociateKmsKeyInput) (re
// within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt
// this data whenever it is requested.
//
-// Important: CloudWatch Logs supports only symmetric CMKs. Do not use an associate
-// an asymmetric CMK with your log group. For more information, see Using Symmetric
+// CloudWatch Logs supports only symmetric CMKs. Do not use an associate an
+// asymmetric CMK with your log group. For more information, see Using Symmetric
// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
//
-// Note that it can take up to 5 minutes for this operation to take effect.
+// It can take up to 5 minutes for this operation to take effect.
//
// If you attempt to associate a CMK with a log group but the CMK does not exist
-// or the CMK is disabled, you will receive an InvalidParameterException error.
+// or the CMK is disabled, you receive an InvalidParameterException error.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -254,7 +254,9 @@ func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (
// CreateExportTask API operation for Amazon CloudWatch Logs.
//
// Creates an export task, which allows you to efficiently export data from
-// a log group to an Amazon S3 bucket.
+// a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation,
+// you must use credentials that have permission to write to the S3 bucket that
+// you specify as the destination.
//
// This is an asynchronous call. If all the required information is provided,
// this operation initiates an export task and responds with the ID of the task.
@@ -363,9 +365,8 @@ func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req
// CreateLogGroup API operation for Amazon CloudWatch Logs.
//
-// Creates a log group with the specified name.
-//
-// You can create up to 20,000 log groups per account.
+// Creates a log group with the specified name. You can create up to 20,000
+// log groups per account.
//
// You must use the following guidelines when naming a log group:
//
@@ -377,6 +378,10 @@ func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req
// '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and
// '#' (number sign)
//
+// When you create a log group, by default the log events in the log group never
+// expire. To set a retention policy so that events expire and are deleted after
+// a specified time, use PutRetentionPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html).
+//
// If you associate a AWS Key Management Service (AWS KMS) customer master key
// (CMK) with the log group, ingested data is encrypted using the CMK. This
// association is stored as long as the data encrypted with the CMK is still
@@ -384,12 +389,11 @@ func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req
// this data whenever it is requested.
//
// If you attempt to associate a CMK with the log group but the CMK does not
-// exist or the CMK is disabled, you will receive an InvalidParameterException
-// error.
+// exist or the CMK is disabled, you receive an InvalidParameterException error.
//
-// Important: CloudWatch Logs supports only symmetric CMKs. Do not associate
-// an asymmetric CMK with your log group. For more information, see Using Symmetric
-// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
+// CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric
+// CMK with your log group. For more information, see Using Symmetric and Asymmetric
+// Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -481,7 +485,9 @@ func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (re
// CreateLogStream API operation for Amazon CloudWatch Logs.
//
-// Creates a log stream for the specified log group.
+// Creates a log stream for the specified log group. A log stream is a sequence
+// of log events that originate from a single source, such as an application
+// instance or a resource that is being monitored.
//
// There is no limit on the number of log streams that you can create for a
// log group. There is a limit of 50 TPS on CreateLogStream operations, after
@@ -941,6 +947,14 @@ func (c *CloudWatchLogs) DeleteQueryDefinitionRequest(input *DeleteQueryDefiniti
// DeleteQueryDefinition API operation for Amazon CloudWatch Logs.
//
+// Deletes a saved CloudWatch Logs Insights query definition. A query definition
+// contains details about a saved CloudWatch Logs Insights query.
+//
+// Each DeleteQueryDefinition operation can delete one query definition.
+//
+// You must have the logs:DeleteQueryDefinition permission to be able to perform
+// this operation.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1814,8 +1828,8 @@ func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFilte
// DescribeMetricFilters API operation for Amazon CloudWatch Logs.
//
-// Lists the specified metric filters. You can list all the metric filters or
-// filter the results by log name, prefix, metric name, or metric namespace.
+// Lists the specified metric filters. You can list all of the metric filters
+// or filter the results by log name, prefix, metric name, or metric namespace.
// The results are ASCII-sorted by filter name.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1955,7 +1969,7 @@ func (c *CloudWatchLogs) DescribeQueriesRequest(input *DescribeQueriesInput) (re
// DescribeQueries API operation for Amazon CloudWatch Logs.
//
// Returns a list of CloudWatch Logs Insights queries that are scheduled, executing,
-// or have been executed recently in this account. You can request all queries,
+// or have been executed recently in this account. You can request all queries
// or limit it to queries of a specific log group or queries with a certain
// status.
//
@@ -2042,6 +2056,12 @@ func (c *CloudWatchLogs) DescribeQueryDefinitionsRequest(input *DescribeQueryDef
// DescribeQueryDefinitions API operation for Amazon CloudWatch Logs.
//
+// This operation returns a paginated list of your saved CloudWatch Logs Insights
+// query definitions.
+//
+// You can use the queryDefinitionNamePrefix parameter to limit the results
+// to only the query definitions that have names that start with a certain string.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2458,10 +2478,15 @@ func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (re
// of the log stream.
//
// By default, this operation returns as many log events as can fit in 1 MB
-// (up to 10,000 log events), or all the events found within the time range
-// that you specify. If the results include a token, then there are more log
-// events available, and you can get additional results by specifying the token
-// in a subsequent call.
+// (up to 10,000 log events) or all the events found within the time range that
+// you specify. If the results include a token, then there are more log events
+// available, and you can get additional results by specifying the token in
+// a subsequent call. This operation can return empty results while there are
+// more log events available through the token.
+//
+// The returned log events are sorted by event timestamp, the timestamp when
+// the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents
+// request.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2605,12 +2630,14 @@ func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *req
// GetLogEvents API operation for Amazon CloudWatch Logs.
//
-// Lists log events from the specified log stream. You can list all the log
+// Lists log events from the specified log stream. You can list all of the log
// events or filter using a time range.
//
// By default, this operation returns as many log events as can fit in a response
// size of 1MB (up to 10,000 log events). You can get additional log events
-// by specifying one of the tokens in a subsequent call.
+// by specifying one of the tokens in a subsequent call. This operation can
+// return empty results while there are more log events available through the
+// token.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2846,12 +2873,12 @@ func (c *CloudWatchLogs) GetLogRecordRequest(input *GetLogRecordInput) (req *req
// GetLogRecord API operation for Amazon CloudWatch Logs.
//
-// Retrieves all the fields and values of a single log event. All fields are
-// retrieved, even if the original query that produced the logRecordPointer
+// Retrieves all of the fields and values of a single log event. All fields
+// are retrieved, even if the original query that produced the logRecordPointer
// retrieved only a subset of fields. Fields are returned as field name/field
// value pairs.
//
-// Additionally, the entire unparsed log event is returned within @message.
+// The full unparsed log event is returned within @message.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2941,7 +2968,7 @@ func (c *CloudWatchLogs) GetQueryResultsRequest(input *GetQueryResultsInput) (re
//
// Returns the results from the specified query.
//
-// Only the fields requested in the query are returned, along with a @ptr field
+// Only the fields requested in the query are returned, along with a @ptr field,
// which is the identifier for the log record. You can use the value of @ptr
// in a GetLogRecord (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html)
// operation to get the full log record.
@@ -3132,6 +3159,9 @@ func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req
// PutDestinationPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html)
// after PutDestination.
//
+// To perform a PutDestination operation, you must also have the iam:PassRole
+// permission.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3310,14 +3340,13 @@ func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *req
// call. An upload in a newly created log stream does not require a sequence
// token. You can also get the sequence token in the expectedSequenceToken field
// from InvalidSequenceTokenException. If you call PutLogEvents twice within
-// a narrow time period using the same value for sequenceToken, both calls may
-// be successful, or one may be rejected.
+// a narrow time period using the same value for sequenceToken, both calls might
+// be successful or one might be rejected.
//
// The batch of events must satisfy the following constraints:
//
-// * The maximum batch size is 1,048,576 bytes, and this size is calculated
-// as the sum of all event messages in UTF-8, plus 26 bytes for each log
-// event.
+// * The maximum batch size is 1,048,576 bytes. This size is calculated as
+// the sum of all event messages in UTF-8, plus 26 bytes for each log event.
//
// * None of the log events in the batch can be more than 2 hours in the
// future.
@@ -3325,7 +3354,7 @@ func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *req
// * None of the log events in the batch can be older than 14 days or older
// than the retention period of the log group.
//
-// * The log events in the batch must be in chronological ordered by their
+// * The log events in the batch must be in chronological order by their
// timestamp. The timestamp is the time the event occurred, expressed as
// the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools
// for PowerShell and the AWS SDK for .NET, the timestamp is specified in
@@ -3532,6 +3561,20 @@ func (c *CloudWatchLogs) PutQueryDefinitionRequest(input *PutQueryDefinitionInpu
// PutQueryDefinition API operation for Amazon CloudWatch Logs.
//
+// Creates or updates a query definition for CloudWatch Logs Insights. For more
+// information, see Analyzing Log Data with CloudWatch Logs Insights (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html).
+//
+// To update a query definition, specify its queryDefinitionId in your request.
+// The values of name, queryString, and logGroupNames are changed to the values
+// that you specify in your update operation. No current values are retained
+// from the current query definition. For example, if you update a current query
+// definition that includes log groups, and you don't specify the logGroupNames
+// parameter in your update operation, the query definition changes to contain
+// no log groups.
+//
+// You must have the logs:PutQueryDefinition permission to be able to perform
+// this operation.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3617,7 +3660,7 @@ func (c *CloudWatchLogs) PutResourcePolicyRequest(input *PutResourcePolicyInput)
//
// Creates or updates a resource policy allowing other AWS services to put log
// events to this account, such as Amazon Route 53. An account can have up to
-// 10 resource policies per region.
+// 10 resource policies per AWS Region.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3797,8 +3840,11 @@ func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilt
// Creates or updates a subscription filter and associates it with the specified
// log group. Subscription filters allow you to subscribe to a real-time stream
// of log events ingested through PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html)
-// and have them delivered to a specific destination. Currently, the supported
-// destinations are:
+// and have them delivered to a specific destination. When log events are sent
+// to the receiving service, they are Base64 encoded and compressed with the
+// gzip format.
+//
+// The following destinations are supported for subscription filters:
//
// * An Amazon Kinesis stream belonging to the same account as the subscription
// filter, for same-account delivery.
@@ -3817,6 +3863,9 @@ func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilt
// filterName. Otherwise, the call fails because you cannot associate a second
// filter with a log group.
//
+// To perform a PutSubscriptionFilter operation, you must also have the iam:PassRole
+// permission.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3907,12 +3956,12 @@ func (c *CloudWatchLogs) StartQueryRequest(input *StartQueryInput) (req *request
// StartQuery API operation for Amazon CloudWatch Logs.
//
// Schedules a query of a log group using CloudWatch Logs Insights. You specify
-// the log group and time range to query, and the query string to use.
+// the log group and time range to query and the query string to use.
//
// For more information, see CloudWatch Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
//
// Queries time out after 15 minutes of execution. If your queries are timing
-// out, reduce the time range being searched, or partition your query into a
+// out, reduce the time range being searched or partition your query into a
// number of queries.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -3925,7 +3974,7 @@ func (c *CloudWatchLogs) StartQueryRequest(input *StartQueryInput) (req *request
// Returned Error Types:
// * MalformedQueryException
// The query string is not valid. Details about this error are displayed in
-// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html)"/>.
+// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html).
//
// For more information about valid query syntax, see CloudWatch Logs Insights
// Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
@@ -5049,6 +5098,10 @@ func (s DeleteMetricFilterOutput) GoString() string {
type DeleteQueryDefinitionInput struct {
_ struct{} `type:"structure"`
+ // The ID of the query definition that you want to delete. You can use DescribeQueryDefinitions
+ // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html)
+ // to retrieve the IDs of your saved query definitions.
+ //
// QueryDefinitionId is a required field
QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string" required:"true"`
}
@@ -5085,6 +5138,8 @@ func (s *DeleteQueryDefinitionInput) SetQueryDefinitionId(v string) *DeleteQuery
type DeleteQueryDefinitionOutput struct {
_ struct{} `type:"structure"`
+ // A value of TRUE indicates that the operation succeeded. FALSE indicates that
+ // the operation failed.
Success *bool `locationName:"success" type:"boolean"`
}
@@ -5535,6 +5590,9 @@ type DescribeLogGroupsOutput struct {
_ struct{} `type:"structure"`
// The log groups.
+ //
+ // If the retentionInDays value if not included for a log group, then that log
+ // group is set to have its events never expire.
LogGroups []*LogGroup `locationName:"logGroups" type:"list"`
// The token for the next set of items to return. The token expires after 24
@@ -5583,7 +5641,7 @@ type DescribeLogStreamsInput struct {
// The prefix to match.
//
- // If orderBy is LastEventTime,you cannot specify this parameter.
+ // If orderBy is LastEventTime, you cannot specify this parameter.
LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"`
// The token for the next set of items to return. (You received this token from
@@ -5597,11 +5655,11 @@ type DescribeLogStreamsInput struct {
// If you order the results by event time, you cannot specify the logStreamNamePrefix
// parameter.
//
- // lastEventTimestamp represents the time of the most recent log event in the
+ // lastEventTimeStamp represents the time of the most recent log event in the
// log stream in CloudWatch Logs. This number is expressed as the number of
// milliseconds after Jan 1, 1970 00:00:00 UTC. lastEventTimeStamp updates on
// an eventual consistency basis. It typically updates in less than an hour
- // from ingestion, but may take longer in some rare situations.
+ // from ingestion, but in rare situations might take longer.
OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"`
}
@@ -5712,7 +5770,8 @@ func (s *DescribeLogStreamsOutput) SetNextToken(v string) *DescribeLogStreamsOut
type DescribeMetricFiltersInput struct {
_ struct{} `type:"structure"`
- // The prefix to match.
+ // The prefix to match. CloudWatch Logs uses the value you set here only if
+ // you also include the logGroupName parameter in your request.
FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"`
// The maximum number of items returned. If you don't specify a value, the default
@@ -5945,12 +6004,15 @@ func (s *DescribeQueriesOutput) SetQueries(v []*QueryInfo) *DescribeQueriesOutpu
type DescribeQueryDefinitionsInput struct {
_ struct{} `type:"structure"`
+ // Limits the number of returned query definitions to the specified number.
MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
// The token for the next set of items to return. The token expires after 24
// hours.
NextToken *string `locationName:"nextToken" min:"1" type:"string"`
+ // Use this parameter to filter your results to only the query definitions that
+ // have names that start with the prefix you specify.
QueryDefinitionNamePrefix *string `locationName:"queryDefinitionNamePrefix" min:"1" type:"string"`
}
@@ -6008,6 +6070,7 @@ type DescribeQueryDefinitionsOutput struct {
// hours.
NextToken *string `locationName:"nextToken" min:"1" type:"string"`
+ // The list of query definitions that match your request.
QueryDefinitions []*QueryDefinition `locationName:"queryDefinitions" type:"list"`
}
@@ -6249,7 +6312,7 @@ type Destination struct {
// A role for impersonation, used when delivering log events to the target.
RoleArn *string `locationName:"roleArn" min:"1" type:"string"`
- // The Amazon Resource Name (ARN) of the physical target to where the log events
+ // The Amazon Resource Name (ARN) of the physical target where the log events
// are delivered (for example, a Kinesis stream).
TargetArn *string `locationName:"targetArn" min:"1" type:"string"`
}
@@ -6359,13 +6422,13 @@ func (s DisassociateKmsKeyOutput) GoString() string {
type ExportTask struct {
_ struct{} `type:"structure"`
- // The name of Amazon S3 bucket to which the log data was exported.
+ // The name of the S3 bucket to which the log data was exported.
Destination *string `locationName:"destination" min:"1" type:"string"`
// The prefix that was used as the start of Amazon S3 key for every object exported.
DestinationPrefix *string `locationName:"destinationPrefix" type:"string"`
- // Execution info about the export task.
+ // Execution information about the export task.
ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"`
// The start time, expressed as the number of milliseconds after Jan 1, 1970
@@ -6541,9 +6604,9 @@ type FilterLogEventsInput struct {
// the first log stream are searched first, then those in the next log stream,
// and so on. The default is false.
//
- // IMPORTANT: Starting on June 17, 2019, this parameter will be ignored and
- // the value will be assumed to be true. The response from this operation will
- // always interleave events from multiple log streams within a log group.
+ // Important: Starting on June 17, 2019, this parameter is ignored and the value
+ // is assumed to be true. The response from this operation always interleaves
+ // events from multiple log streams within a log group.
//
// Deprecated: Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group.
Interleaved *bool `locationName:"interleaved" deprecated:"true" type:"boolean"`
@@ -6577,6 +6640,9 @@ type FilterLogEventsInput struct {
// The start of the time range, expressed as the number of milliseconds after
// Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not
// returned.
+ //
+ // If you omit startTime and endTime the most recent log events are retrieved,
+ // to up 1 MB or 10,000 log events.
StartTime *int64 `locationName:"startTime" type:"long"`
}
@@ -6682,6 +6748,9 @@ type FilterLogEventsOutput struct {
// after 24 hours.
NextToken *string `locationName:"nextToken" min:"1" type:"string"`
+ // IMPORTANT Starting on May 15, 2020, this parameter will be deprecated. This
+ // parameter will be an empty list after the deprecation occurs.
+ //
// Indicates which log streams have been searched and whether each has been
// searched completely.
SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"`
@@ -6907,13 +6976,13 @@ type GetLogEventsOutput struct {
Events []*OutputLogEvent `locationName:"events" type:"list"`
// The token for the next set of items in the backward direction. The token
- // expires after 24 hours. This token will never be null. If you have reached
- // the end of the stream, it will return the same token you passed in.
+ // expires after 24 hours. This token is never null. If you have reached the
+ // end of the stream, it returns the same token you passed in.
NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"`
// The token for the next set of items in the forward direction. The token expires
- // after 24 hours. If you have reached the end of the stream, it will return
- // the same token you passed in.
+ // after 24 hours. If you have reached the end of the stream, it returns the
+ // same token you passed in.
NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"`
}
@@ -7140,14 +7209,15 @@ type GetQueryResultsOutput struct {
// Includes the number of log events scanned by the query, the number of log
// events that matched the query criteria, and the total number of bytes in
- // the log events that were scanned.
+ // the log events that were scanned. These values reflect the full raw results
+ // of the query.
Statistics *QueryStatistics `locationName:"statistics" type:"structure"`
// The status of the most recent running of the query. Possible values are Cancelled,
// Complete, Failed, Running, Scheduled, Timeout, and Unknown.
//
// Queries time out after 15 minutes of execution. To avoid having your queries
- // time out, reduce the time range being searched, or partition your query into
+ // time out, reduce the time range being searched or partition your query into
// a number of queries.
Status *string `locationName:"status" type:"string" enum:"QueryStatus"`
}
@@ -7552,6 +7622,9 @@ type LogGroup struct {
// The number of days to retain the log events in the specified log group. Possible
// values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731,
// 1827, and 3653.
+ //
+ // If you omit retentionInDays in a PutRetentionPolicy operation, the events
+ // in the log group are always retained and never expire.
RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"`
// The number of bytes stored.
@@ -7663,8 +7736,8 @@ type LogStream struct {
// The time of the most recent log event in the log stream in CloudWatch Logs.
// This number is expressed as the number of milliseconds after Jan 1, 1970
// 00:00:00 UTC. The lastEventTime value updates on an eventual consistency
- // basis. It typically updates in less than an hour from ingestion, but may
- // take longer in some rare situations.
+ // basis. It typically updates in less than an hour from ingestion, but in rare
+ // situations might take longer.
LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"`
// The ingestion time, expressed as the number of milliseconds after Jan 1,
@@ -7676,7 +7749,7 @@ type LogStream struct {
// The number of bytes stored.
//
- // IMPORTANT:On June 17, 2019, this parameter was deprecated for log streams,
+ // Important: On June 17, 2019, this parameter was deprecated for log streams,
// and is always reported as zero. This change applies only to log streams.
// The storedBytes parameter for log groups is not affected.
//
@@ -7746,7 +7819,7 @@ func (s *LogStream) SetUploadSequenceToken(v string) *LogStream {
}
// The query string is not valid. Details about this error are displayed in
-// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html)"/>.
+// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html).
//
// For more information about valid query syntax, see CloudWatch Logs Insights
// Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
@@ -7822,7 +7895,7 @@ type MetricFilter struct {
FilterName *string `locationName:"filterName" min:"1" type:"string"`
// A symbolic description of how CloudWatch Logs should interpret the data in
- // each log event. For example, a log event may contain timestamps, IP addresses,
+ // each log event. For example, a log event can contain timestamps, IP addresses,
// strings, and so on. You use the filter pattern to specify what to look for
// in the log event message.
FilterPattern *string `locationName:"filterPattern" type:"string"`
@@ -8200,7 +8273,7 @@ type PutDestinationPolicyInput struct {
_ struct{} `type:"structure"`
// An IAM policy document that authorizes cross-account users to deliver their
- // log events to the associated destination.
+ // log events to the associated destination. This can be up to 5120 bytes.
//
// AccessPolicy is a required field
AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"`
@@ -8291,7 +8364,7 @@ type PutLogEventsInput struct {
// call. An upload in a newly created log stream does not require a sequence
// token. You can also get the sequence token using DescribeLogStreams (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogStreams.html).
// If you call PutLogEvents twice within a narrow time period using the same
- // value for sequenceToken, both calls may be successful, or one may be rejected.
+ // value for sequenceToken, both calls might be successful or one might be rejected.
SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"`
}
@@ -8518,13 +8591,33 @@ func (s PutMetricFilterOutput) GoString() string {
type PutQueryDefinitionInput struct {
_ struct{} `type:"structure"`
+ // Use this parameter to include specific log groups as part of your query definition.
+ //
+ // If you are updating a query definition and you omit this parameter, then
+ // the updated definition will contain no log groups.
LogGroupNames []*string `locationName:"logGroupNames" type:"list"`
+ // A name for the query definition. If you are saving a lot of query definitions,
+ // we recommend that you name them so that you can easily find the ones you
+ // want by using the first part of the name as a filter in the queryDefinitionNamePrefix
+ // parameter of DescribeQueryDefinitions (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html).
+ //
// Name is a required field
Name *string `locationName:"name" min:"1" type:"string" required:"true"`
+ // If you are updating a query definition, use this parameter to specify the
+ // ID of the query definition that you want to update. You can use DescribeQueryDefinitions
+ // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html)
+ // to retrieve the IDs of your saved query definitions.
+ //
+ // If you are creating a query definition, do not specify this parameter. CloudWatch
+ // generates a unique ID for the new query definition and include it in the
+ // response to this operation.
QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"`
+ // The query string to use for this definition. For more information, see CloudWatch
+ // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
+ //
// QueryString is a required field
QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"`
}
@@ -8588,6 +8681,7 @@ func (s *PutQueryDefinitionInput) SetQueryString(v string) *PutQueryDefinitionIn
type PutQueryDefinitionOutput struct {
_ struct{} `type:"structure"`
+ // The ID of the query definition.
QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"`
}
@@ -8697,6 +8791,9 @@ type PutRetentionPolicyInput struct {
// values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731,
// 1827, and 3653.
//
+ // If you omit retentionInDays in a PutRetentionPolicy operation, the events
+ // in the log group are always retained and never expire.
+ //
// RetentionInDays is a required field
RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"`
}
@@ -8777,7 +8874,7 @@ type PutSubscriptionFilterInput struct {
// DestinationArn is a required field
DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"`
- // The method used to distribute log data to the destination. By default log
+ // The method used to distribute log data to the destination. By default, log
// data is grouped by log stream, but the grouping can be set to random for
// a more even distribution. This property is only applicable when the destination
// is an Amazon Kinesis stream.
@@ -8969,17 +9066,26 @@ func (s *QueryCompileErrorLocation) SetStartCharOffset(v int64) *QueryCompileErr
return s
}
+// This structure contains details about a saved CloudWatch Logs Insights query
+// definition.
type QueryDefinition struct {
_ struct{} `type:"structure"`
+ // The date that the query definition was most recently modified.
LastModified *int64 `locationName:"lastModified" type:"long"`
+ // If this query definition contains a list of log groups that it is limited
+ // to, that list appears here.
LogGroupNames []*string `locationName:"logGroupNames" type:"list"`
+ // The name of the query definition.
Name *string `locationName:"name" min:"1" type:"string"`
+ // The unique ID of the query definition.
QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"`
+ // The query string to use for this definition. For more information, see CloudWatch
+ // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
QueryString *string `locationName:"queryString" min:"1" type:"string"`
}
@@ -9591,8 +9697,7 @@ func (s *StartQueryOutput) SetQueryId(v string) *StartQueryOutput {
type StopQueryInput struct {
_ struct{} `type:"structure"`
- // The ID number of the query to stop. If necessary, you can use DescribeQueries
- // to find this ID number.
+ // The ID number of the query to stop. To find this ID number, use DescribeQueries.
//
// QueryId is a required field
QueryId *string `locationName:"queryId" type:"string" required:"true"`
@@ -9669,7 +9774,7 @@ type SubscriptionFilter struct {
FilterName *string `locationName:"filterName" min:"1" type:"string"`
// A symbolic description of how CloudWatch Logs should interpret the data in
- // each log event. For example, a log event may contain timestamps, IP addresses,
+ // each log event. For example, a log event can contain timestamps, IP addresses,
// strings, and so on. You use the filter pattern to specify what to look for
// in the log event message.
FilterPattern *string `locationName:"filterPattern" type:"string"`
@@ -9808,7 +9913,7 @@ type TestMetricFilterInput struct {
_ struct{} `type:"structure"`
// A symbolic description of how CloudWatch Logs should interpret the data in
- // each log event. For example, a log event may contain timestamps, IP addresses,
+ // each log event. For example, a log event can contain timestamps, IP addresses,
// strings, and so on. You use the filter pattern to specify what to look for
// in the log event message.
//
@@ -10023,6 +10128,14 @@ const (
DistributionByLogStream = "ByLogStream"
)
+// Distribution_Values returns all elements of the Distribution enum
+func Distribution_Values() []string {
+ return []string{
+ DistributionRandom,
+ DistributionByLogStream,
+ }
+}
+
const (
// ExportTaskStatusCodeCancelled is a ExportTaskStatusCode enum value
ExportTaskStatusCodeCancelled = "CANCELLED"
@@ -10043,6 +10156,18 @@ const (
ExportTaskStatusCodeRunning = "RUNNING"
)
+// ExportTaskStatusCode_Values returns all elements of the ExportTaskStatusCode enum
+func ExportTaskStatusCode_Values() []string {
+ return []string{
+ ExportTaskStatusCodeCancelled,
+ ExportTaskStatusCodeCompleted,
+ ExportTaskStatusCodeFailed,
+ ExportTaskStatusCodePending,
+ ExportTaskStatusCodePendingCancel,
+ ExportTaskStatusCodeRunning,
+ }
+}
+
const (
// OrderByLogStreamName is a OrderBy enum value
OrderByLogStreamName = "LogStreamName"
@@ -10051,6 +10176,14 @@ const (
OrderByLastEventTime = "LastEventTime"
)
+// OrderBy_Values returns all elements of the OrderBy enum
+func OrderBy_Values() []string {
+ return []string{
+ OrderByLogStreamName,
+ OrderByLastEventTime,
+ }
+}
+
const (
// QueryStatusScheduled is a QueryStatus enum value
QueryStatusScheduled = "Scheduled"
@@ -10067,3 +10200,14 @@ const (
// QueryStatusCancelled is a QueryStatus enum value
QueryStatusCancelled = "Cancelled"
)
+
+// QueryStatus_Values returns all elements of the QueryStatus enum
+func QueryStatus_Values() []string {
+ return []string{
+ QueryStatusScheduled,
+ QueryStatusRunning,
+ QueryStatusComplete,
+ QueryStatusFailed,
+ QueryStatusCancelled,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go
index a20147e7b5..647d683016 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go
@@ -4,8 +4,8 @@
// requests to Amazon CloudWatch Logs.
//
// You can use Amazon CloudWatch Logs to monitor, store, and access your log
-// files from Amazon EC2 instances, AWS CloudTrail, or other sources. You can
-// then retrieve the associated log data from CloudWatch Logs using the CloudWatch
+// files from EC2 instances, AWS CloudTrail, or other sources. You can then
+// retrieve the associated log data from CloudWatch Logs using the CloudWatch
// console, CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or
// CloudWatch Logs SDK.
//
@@ -15,8 +15,8 @@
// Logs to monitor applications and systems using log data. For example,
// CloudWatch Logs can track the number of errors that occur in your application
// logs and send you a notification whenever the rate of errors exceeds a
-// threshold that you specify. CloudWatch Logs uses your log data for monitoring;
-// so, no code changes are required. For example, you can monitor application
+// threshold that you specify. CloudWatch Logs uses your log data for monitoring
+// so no code changes are required. For example, you can monitor application
// logs for specific literal terms (such as "NullReferenceException") or
// count the number of occurrences of a literal term at a particular position
// in log data (such as "404" status codes in an Apache access log). When
@@ -24,8 +24,8 @@
// to a CloudWatch metric that you specify.
//
// * Monitor AWS CloudTrail logged events: You can create alarms in CloudWatch
-// and receive notifications of particular API activity as captured by CloudTrail
-// and use the notification to perform troubleshooting.
+// and receive notifications of particular API activity as captured by CloudTrail.
+// You can use the notification to perform troubleshooting.
//
// * Archive log data: You can use CloudWatch Logs to store your log data
// in highly durable storage. You can change the log retention setting so
diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go
index 39c9cd5eaf..44e3bb576f 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go
@@ -43,7 +43,7 @@ const (
// "MalformedQueryException".
//
// The query string is not valid. Details about this error are displayed in
- // a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html)"/>.
+ // a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html).
//
// For more information about valid query syntax, see CloudWatch Logs Insights
// Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go
new file mode 100644
index 0000000000..4498f285e4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go
@@ -0,0 +1,1210 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sso
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/restjson"
+)
+
+const opGetRoleCredentials = "GetRoleCredentials"
+
+// GetRoleCredentialsRequest generates a "aws/request.Request" representing the
+// client's request for the GetRoleCredentials operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRoleCredentials for more information on using the GetRoleCredentials
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRoleCredentialsRequest method.
+// req, resp := client.GetRoleCredentialsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
+func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) {
+ op := &request.Operation{
+ Name: opGetRoleCredentials,
+ HTTPMethod: "GET",
+ HTTPPath: "/federation/credentials",
+ }
+
+ if input == nil {
+ input = &GetRoleCredentialsInput{}
+ }
+
+ output = &GetRoleCredentialsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// GetRoleCredentials API operation for AWS Single Sign-On.
+//
+// Returns the STS short-term credentials for a given role name that is assigned
+// to the user.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation GetRoleCredentials for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// * ResourceNotFoundException
+// The specified resource doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
+func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) {
+ req, out := c.GetRoleCredentialsRequest(input)
+ return out, req.Send()
+}
+
+// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRoleCredentials for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) {
+ req, out := c.GetRoleCredentialsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListAccountRoles = "ListAccountRoles"
+
+// ListAccountRolesRequest generates a "aws/request.Request" representing the
+// client's request for the ListAccountRoles operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListAccountRoles for more information on using the ListAccountRoles
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListAccountRolesRequest method.
+// req, resp := client.ListAccountRolesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
+func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) {
+ op := &request.Operation{
+ Name: opListAccountRoles,
+ HTTPMethod: "GET",
+ HTTPPath: "/assignment/roles",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"nextToken"},
+ OutputTokens: []string{"nextToken"},
+ LimitToken: "maxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListAccountRolesInput{}
+ }
+
+ output = &ListAccountRolesOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// ListAccountRoles API operation for AWS Single Sign-On.
+//
+// Lists all roles that are assigned to the user for a given AWS account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation ListAccountRoles for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// * ResourceNotFoundException
+// The specified resource doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
+func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) {
+ req, out := c.ListAccountRolesRequest(input)
+ return out, req.Send()
+}
+
+// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListAccountRoles for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) {
+ req, out := c.ListAccountRolesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListAccountRoles method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListAccountRoles operation.
+// pageNum := 0
+// err := client.ListAccountRolesPages(params,
+// func(page *sso.ListAccountRolesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error {
+ return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListAccountRolesPagesWithContext same as ListAccountRolesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListAccountRolesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListAccountRolesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListAccounts = "ListAccounts"
+
+// ListAccountsRequest generates a "aws/request.Request" representing the
+// client's request for the ListAccounts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListAccounts for more information on using the ListAccounts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListAccountsRequest method.
+// req, resp := client.ListAccountsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
+func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) {
+ op := &request.Operation{
+ Name: opListAccounts,
+ HTTPMethod: "GET",
+ HTTPPath: "/assignment/accounts",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"nextToken"},
+ OutputTokens: []string{"nextToken"},
+ LimitToken: "maxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListAccountsInput{}
+ }
+
+ output = &ListAccountsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// ListAccounts API operation for AWS Single Sign-On.
+//
+// Lists all AWS accounts assigned to the user. These AWS accounts are assigned
+// by the administrator of the account. For more information, see Assign User
+// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
+// in the AWS SSO User Guide. This operation returns a paginated response.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation ListAccounts for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// * ResourceNotFoundException
+// The specified resource doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
+func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) {
+ req, out := c.ListAccountsRequest(input)
+ return out, req.Send()
+}
+
+// ListAccountsWithContext is the same as ListAccounts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListAccounts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) {
+ req, out := c.ListAccountsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListAccountsPages iterates over the pages of a ListAccounts operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListAccounts method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListAccounts operation.
+// pageNum := 0
+// err := client.ListAccountsPages(params,
+// func(page *sso.ListAccountsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error {
+ return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListAccountsPagesWithContext same as ListAccountsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListAccountsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListAccountsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opLogout = "Logout"
+
+// LogoutRequest generates a "aws/request.Request" representing the
+// client's request for the Logout operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See Logout for more information on using the Logout
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the LogoutRequest method.
+// req, resp := client.LogoutRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
+func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) {
+ op := &request.Operation{
+ Name: opLogout,
+ HTTPMethod: "POST",
+ HTTPPath: "/logout",
+ }
+
+ if input == nil {
+ input = &LogoutInput{}
+ }
+
+ output = &LogoutOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// Logout API operation for AWS Single Sign-On.
+//
+// Removes the client- and server-side session that is associated with the user.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Single Sign-On's
+// API operation Logout for usage and error information.
+//
+// Returned Error Types:
+// * InvalidRequestException
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// * UnauthorizedException
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+//
+// * TooManyRequestsException
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
+func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) {
+ req, out := c.LogoutRequest(input)
+ return out, req.Send()
+}
+
+// LogoutWithContext is the same as Logout with the addition of
+// the ability to pass a context and additional request options.
+//
+// See Logout for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) {
+ req, out := c.LogoutRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Provides information about your AWS account.
+type AccountInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The identifier of the AWS account that is assigned to the user.
+ AccountId *string `locationName:"accountId" type:"string"`
+
+ // The display name of the AWS account that is assigned to the user.
+ AccountName *string `locationName:"accountName" type:"string"`
+
+ // The email address of the AWS account that is assigned to the user.
+ EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s AccountInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccountInfo) GoString() string {
+ return s.String()
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *AccountInfo) SetAccountId(v string) *AccountInfo {
+ s.AccountId = &v
+ return s
+}
+
+// SetAccountName sets the AccountName field's value.
+func (s *AccountInfo) SetAccountName(v string) *AccountInfo {
+ s.AccountName = &v
+ return s
+}
+
+// SetEmailAddress sets the EmailAddress field's value.
+func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo {
+ s.EmailAddress = &v
+ return s
+}
+
+type GetRoleCredentialsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // AccountId is a required field
+ AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
+
+ // The friendly name of the role that is assigned to the user.
+ //
+ // RoleName is a required field
+ RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRoleCredentialsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRoleCredentialsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRoleCredentialsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+ if s.AccountId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccountId"))
+ }
+ if s.RoleName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput {
+ s.AccountId = &v
+ return s
+}
+
+// SetRoleName sets the RoleName field's value.
+func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput {
+ s.RoleName = &v
+ return s
+}
+
+type GetRoleCredentialsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The credentials for the role that is assigned to the user.
+ RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetRoleCredentialsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRoleCredentialsOutput) GoString() string {
+ return s.String()
+}
+
+// SetRoleCredentials sets the RoleCredentials field's value.
+func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput {
+ s.RoleCredentials = v
+ return s
+}
+
+// Indicates that a problem occurred with the input to the request. For example,
+// a required parameter might be missing or out of range.
+type InvalidRequestException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s InvalidRequestException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InvalidRequestException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRequestException(v protocol.ResponseMetadata) error {
+ return &InvalidRequestException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRequestException) Code() string {
+ return "InvalidRequestException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRequestException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRequestException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRequestException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRequestException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRequestException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type ListAccountRolesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // AccountId is a required field
+ AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
+
+ // The number of items that clients can request per page.
+ MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"`
+
+ // The page token from the previous response output when you request subsequent
+ // pages.
+ NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListAccountRolesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountRolesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListAccountRolesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+ if s.AccountId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccountId"))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput {
+ s.AccountId = &v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput {
+ s.NextToken = &v
+ return s
+}
+
+type ListAccountRolesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // A paginated response with the list of roles and the next token if more results
+ // are available.
+ RoleList []*RoleInfo `locationName:"roleList" type:"list"`
+}
+
+// String returns the string representation
+func (s ListAccountRolesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountRolesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetRoleList sets the RoleList field's value.
+func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput {
+ s.RoleList = v
+ return s
+}
+
+type ListAccountsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+
+ // This is the number of items clients can request per page.
+ MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"`
+
+ // (Optional) When requesting subsequent pages, this is the page token from
+ // the previous response output.
+ NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListAccountsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListAccountsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput {
+ s.NextToken = &v
+ return s
+}
+
+type ListAccountsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A paginated response with the list of account information and the next token
+ // if more results are available.
+ AccountList []*AccountInfo `locationName:"accountList" type:"list"`
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation
+func (s ListAccountsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListAccountsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccountList sets the AccountList field's value.
+func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput {
+ s.AccountList = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput {
+ s.NextToken = &v
+ return s
+}
+
+type LogoutInput struct {
+ _ struct{} `type:"structure"`
+
+ // The token issued by the CreateToken API call. For more information, see CreateToken
+ // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
+ // in the AWS SSO OIDC API Reference Guide.
+ //
+ // AccessToken is a required field
+ AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
+}
+
+// String returns the string representation
+func (s LogoutInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LogoutInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LogoutInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LogoutInput"}
+ if s.AccessToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessToken"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *LogoutInput) SetAccessToken(v string) *LogoutInput {
+ s.AccessToken = &v
+ return s
+}
+
+type LogoutOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s LogoutOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LogoutOutput) GoString() string {
+ return s.String()
+}
+
+// The specified resource doesn't exist.
+type ResourceNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s ResourceNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ResourceNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
+ return &ResourceNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ResourceNotFoundException) Code() string {
+ return "ResourceNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *ResourceNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ResourceNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *ResourceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ResourceNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ResourceNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Provides information about the role credentials that are assigned to the
+// user.
+type RoleCredentials struct {
+ _ struct{} `type:"structure"`
+
+ // The identifier used for the temporary security credentials. For more information,
+ // see Using Temporary Security Credentials to Request Access to AWS Resources
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ AccessKeyId *string `locationName:"accessKeyId" type:"string"`
+
+ // The date on which temporary security credentials expire.
+ Expiration *int64 `locationName:"expiration" type:"long"`
+
+ // The key that is used to sign the request. For more information, see Using
+ // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"`
+
+ // The token used for temporary credentials. For more information, see Using
+ // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
+ // in the AWS IAM User Guide.
+ SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"`
+}
+
+// String returns the string representation
+func (s RoleCredentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoleCredentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials {
+ s.SessionToken = &v
+ return s
+}
+
+// Provides information about the role that is assigned to the user.
+type RoleInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The identifier of the AWS account assigned to the user.
+ AccountId *string `locationName:"accountId" type:"string"`
+
+ // The friendly name of the role that is assigned to the user.
+ RoleName *string `locationName:"roleName" type:"string"`
+}
+
+// String returns the string representation
+func (s RoleInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoleInfo) GoString() string {
+ return s.String()
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *RoleInfo) SetAccountId(v string) *RoleInfo {
+ s.AccountId = &v
+ return s
+}
+
+// SetRoleName sets the RoleName field's value.
+func (s *RoleInfo) SetRoleName(v string) *RoleInfo {
+ s.RoleName = &v
+ return s
+}
+
+// Indicates that the request is being made too frequently and is more than
+// what the server can handle.
+type TooManyRequestsException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s TooManyRequestsException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TooManyRequestsException) GoString() string {
+ return s.String()
+}
+
+func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error {
+ return &TooManyRequestsException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *TooManyRequestsException) Code() string {
+ return "TooManyRequestsException"
+}
+
+// Message returns the exception's message.
+func (s *TooManyRequestsException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *TooManyRequestsException) OrigErr() error {
+ return nil
+}
+
+func (s *TooManyRequestsException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *TooManyRequestsException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *TooManyRequestsException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+type UnauthorizedException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation
+func (s UnauthorizedException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UnauthorizedException) GoString() string {
+ return s.String()
+}
+
+func newErrorUnauthorizedException(v protocol.ResponseMetadata) error {
+ return &UnauthorizedException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *UnauthorizedException) Code() string {
+ return "UnauthorizedException"
+}
+
+// Message returns the exception's message.
+func (s *UnauthorizedException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *UnauthorizedException) OrigErr() error {
+ return nil
+}
+
+func (s *UnauthorizedException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *UnauthorizedException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *UnauthorizedException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
new file mode 100644
index 0000000000..92d82b2afb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
@@ -0,0 +1,44 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sso provides the client and types for making API
+// requests to AWS Single Sign-On.
+//
+// AWS Single Sign-On Portal is a web service that makes it easy for you to
+// assign user access to AWS SSO resources such as the user portal. Users can
+// get AWS account applications and roles assigned to them and get federated
+// into the application.
+//
+// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
+// in the AWS SSO User Guide.
+//
+// This API reference guide describes the AWS SSO Portal operations that you
+// can call programatically and includes detailed information on data types
+// and errors.
+//
+// AWS provides SDKs that consist of libraries and sample code for various programming
+// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
+// provide a convenient way to create programmatic access to AWS SSO and other
+// AWS services. For more information about the AWS SDKs, including how to download
+// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
+//
+// See sso package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
+//
+// Using the Client
+//
+// To contact AWS Single Sign-On with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Single Sign-On client SSO for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New
+package sso
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
new file mode 100644
index 0000000000..77a6792e35
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
@@ -0,0 +1,44 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sso
+
+import (
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+const (
+
+ // ErrCodeInvalidRequestException for service response error code
+ // "InvalidRequestException".
+ //
+ // Indicates that a problem occurred with the input to the request. For example,
+ // a required parameter might be missing or out of range.
+ ErrCodeInvalidRequestException = "InvalidRequestException"
+
+ // ErrCodeResourceNotFoundException for service response error code
+ // "ResourceNotFoundException".
+ //
+ // The specified resource doesn't exist.
+ ErrCodeResourceNotFoundException = "ResourceNotFoundException"
+
+ // ErrCodeTooManyRequestsException for service response error code
+ // "TooManyRequestsException".
+ //
+ // Indicates that the request is being made too frequently and is more than
+ // what the server can handle.
+ ErrCodeTooManyRequestsException = "TooManyRequestsException"
+
+ // ErrCodeUnauthorizedException for service response error code
+ // "UnauthorizedException".
+ //
+ // Indicates that the request is not authorized. This can happen due to an invalid
+ // access token in the request.
+ ErrCodeUnauthorizedException = "UnauthorizedException"
+)
+
+var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
+ "InvalidRequestException": newErrorInvalidRequestException,
+ "ResourceNotFoundException": newErrorResourceNotFoundException,
+ "TooManyRequestsException": newErrorTooManyRequestsException,
+ "UnauthorizedException": newErrorUnauthorizedException,
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go
new file mode 100644
index 0000000000..35175331fc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go
@@ -0,0 +1,104 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sso
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/restjson"
+)
+
+// SSO provides the API operation methods for making requests to
+// AWS Single Sign-On. See this package's package overview docs
+// for details on the service.
+//
+// SSO methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type SSO struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "SSO" // Name of service.
+ EndpointsID = "portal.sso" // ID to lookup a service endpoint with.
+ ServiceID = "SSO" // ServiceID is a unique identifier of a specific service.
+)
+
+// New creates a new instance of the SSO client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// mySession := session.Must(session.NewSession())
+//
+// // Create a SSO client from just a session.
+// svc := sso.New(mySession)
+//
+// // Create a SSO client with additional configuration
+// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ if c.SigningNameDerived || len(c.SigningName) == 0 {
+ c.SigningName = "awsssoportal"
+ }
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO {
+ svc := &SSO{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2019-06-10",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(
+ protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
+ )
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a SSO operation and runs any
+// custom request initialization.
+func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
new file mode 100644
index 0000000000..4cac247c18
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
@@ -0,0 +1,86 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package ssoiface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/sso"
+)
+
+// SSOAPI provides an interface to enable mocking the
+// sso.SSO service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // AWS Single Sign-On.
+// func myFunc(svc ssoiface.SSOAPI) bool {
+// // Make svc.GetRoleCredentials request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := sso.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockSSOClient struct {
+// ssoiface.SSOAPI
+// }
+// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockSSOClient{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type SSOAPI interface {
+ GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error)
+ GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error)
+ GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput)
+
+ ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error)
+ ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error)
+ ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput)
+
+ ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error
+ ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error
+
+ ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error)
+ ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error)
+ ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput)
+
+ ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error
+ ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error
+
+ Logout(*sso.LogoutInput) (*sso.LogoutOutput, error)
+ LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error)
+ LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput)
+}
+
+var _ SSOAPI = (*sso.SSO)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index 550b5f687f..bfc4372f9f 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -207,6 +207,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
req, out := c.AssumeRoleRequest(input)
@@ -626,7 +630,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
//
-// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
// Walk through the process of authenticating through Login with Amazon,
// Facebook, or Google, getting temporary security credentials, and then
// using those credentials to make a request to AWS.
@@ -1788,7 +1792,7 @@ type AssumeRoleWithSAMLInput struct {
// in the IAM User Guide.
//
// SAMLAssertion is a required field
- SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
}
// String returns the string representation
@@ -2100,7 +2104,7 @@ type AssumeRoleWithWebIdentityInput struct {
// the application makes an AssumeRoleWithWebIdentity call.
//
// WebIdentityToken is a required field
- WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
}
// String returns the string representation
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
index fcb720dcac..cb1debbaa4 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -3,87 +3,11 @@
// Package sts provides the client and types for making API
// requests to AWS Security Token Service.
//
-// The AWS Security Token Service (STS) is a web service that enables you to
-// request temporary, limited-privilege credentials for AWS Identity and Access
-// Management (IAM) users or for users that you authenticate (federated users).
-// This guide provides descriptions of the STS API. For more detailed information
-// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
-//
-// For information about setting up signatures and authorization through the
-// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
-// in the AWS General Reference. For general information about the Query API,
-// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
-// in Using IAM. For information about using security tokens with other AWS
-// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
-// in the IAM User Guide.
-//
-// If you're new to AWS and need additional technical information about a specific
-// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
-// (http://aws.amazon.com/documentation/).
-//
-// Endpoints
-//
-// By default, AWS Security Token Service (STS) is available as a global service,
-// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com.
-// Global requests map to the US East (N. Virginia) region. AWS recommends using
-// Regional AWS STS endpoints instead of the global endpoint to reduce latency,
-// build in redundancy, and increase session token validity. For more information,
-// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
-//
-// Most AWS Regions are enabled for operations in all AWS services by default.
-// Those Regions are automatically activated for use with AWS STS. Some Regions,
-// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more
-// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
-// in the AWS General Reference. When you enable these AWS Regions, they are
-// automatically activated for use with AWS STS. You cannot activate the STS
-// endpoint for a Region that is disabled. Tokens that are valid in all AWS
-// Regions are longer than tokens that are valid in Regions that are enabled
-// by default. Changing this setting might affect existing systems where you
-// temporarily store tokens. For more information, see Managing Global Endpoint
-// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens)
-// in the IAM User Guide.
-//
-// After you activate a Region for use with AWS STS, you can direct AWS STS
-// API calls to that Region. AWS STS recommends that you provide both the Region
-// and endpoint when you make calls to a Regional endpoint. You can provide
-// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
-// Kong). In this case, the calls are directed to the STS Regional endpoint.
-// However, if you provide the Region alone for Regions enabled by default,
-// the calls are directed to the global endpoint of https://sts.amazonaws.com.
-//
-// To view the list of AWS STS endpoints and whether they are active by default,
-// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code)
-// in the IAM User Guide.
-//
-// Recording API requests
-//
-// STS supports AWS CloudTrail, which is a service that records AWS calls for
-// your AWS account and delivers log files to an Amazon S3 bucket. By using
-// information collected by CloudTrail, you can determine what requests were
-// successfully made to STS, who made the request, when it was made, and so
-// on.
-//
-// If you activate AWS STS endpoints in Regions other than the default global
-// endpoint, then you must also turn on CloudTrail logging in those Regions.
-// This is necessary to record any AWS STS API calls that are made in those
-// Regions. For more information, see Turning On CloudTrail in Additional Regions
-// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html)
-// in the AWS CloudTrail User Guide.
-//
-// AWS Security Token Service (STS) is a global service with a single endpoint
-// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls
-// to a global service. However, because this endpoint is physically located
-// in the US East (N. Virginia) Region, your logs list us-east-1 as the event
-// Region. CloudTrail does not write these logs to the US East (Ohio) Region
-// unless you choose to include global service logs in that Region. CloudTrail
-// writes calls to all Regional endpoints to their respective Regions. For example,
-// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio)
-// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU
-// (Frankfurt) Region.
-//
-// To learn more about CloudTrail, including how to turn it on and find your
-// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+// AWS Security Token Service (STS) enables you to request temporary, limited-privilege
+// credentials for AWS Identity and Access Management (IAM) users or for users
+// that you authenticate (federated users). This guide provides descriptions
+// of the STS API. For more information about using this service, see Temporary
+// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
//
diff --git a/vendor/github.com/bsphere/le_go/.travis.yml b/vendor/github.com/bsphere/le_go/.travis.yml
index aa1b91fe6c..6c604df110 100644
--- a/vendor/github.com/bsphere/le_go/.travis.yml
+++ b/vendor/github.com/bsphere/le_go/.travis.yml
@@ -1,4 +1,4 @@
language: go
go:
- - 1.4 \ No newline at end of file
+ - 1.12.x
diff --git a/vendor/github.com/bsphere/le_go/le.go b/vendor/github.com/bsphere/le_go/le.go
index 553e4c7054..a536743e80 100644
--- a/vendor/github.com/bsphere/le_go/le.go
+++ b/vendor/github.com/bsphere/le_go/le.go
@@ -127,9 +127,22 @@ func (logger *Logger) Flags() int {
// Output does the actual writing to the TCP connection
func (logger *Logger) Output(calldepth int, s string) error {
- _, err := logger.Write([]byte(s))
-
- return err
+ var (
+ err error
+ waitPeriod = time.Millisecond
+ )
+ for {
+ _, err = logger.Write([]byte(s))
+ if err != nil {
+ if connectionErr := logger.openConnection(); connectionErr != nil {
+ return connectionErr
+ }
+ waitPeriod *= 2
+ time.Sleep(waitPeriod)
+ continue
+ }
+ return err
+ }
}
// Panic is same as Print() but calls to panic
@@ -159,18 +172,18 @@ func (logger *Logger) Prefix() string {
}
// Print logs a message
-func (logger *Logger) Print(v ...interface{}) {
- logger.Output(2, fmt.Sprint(v...))
+func (logger *Logger) Print(v ...interface{}) error {
+ return logger.Output(2, fmt.Sprint(v...))
}
// Printf logs a formatted message
-func (logger *Logger) Printf(format string, v ...interface{}) {
- logger.Output(2, fmt.Sprintf(format, v...))
+func (logger *Logger) Printf(format string, v ...interface{}) error {
+ return logger.Output(2, fmt.Sprintf(format, v...))
}
// Println logs a message with a linebreak
-func (logger *Logger) Println(v ...interface{}) {
- logger.Output(2, fmt.Sprintln(v...))
+func (logger *Logger) Println(v ...interface{}) error {
+ return logger.Output(2, fmt.Sprintln(v...))
}
// SetFlags sets the logger flags
@@ -187,11 +200,10 @@ func (logger *Logger) SetPrefix(prefix string) {
// it adds the access token and prefix and also replaces
// line breaks with the unicode \u2028 character
func (logger *Logger) Write(p []byte) (n int, err error) {
+ logger.mu.Lock()
if err := logger.ensureOpenConnection(); err != nil {
return 0, err
}
-
- logger.mu.Lock()
defer logger.mu.Unlock()
logger.makeBuf(p)
diff --git a/vendor/github.com/fernet/fernet-go/fernet.go b/vendor/github.com/fernet/fernet-go/fernet.go
index 9e4bcce35c..b35fdbbe4f 100644
--- a/vendor/github.com/fernet/fernet-go/fernet.go
+++ b/vendor/github.com/fernet/fernet-go/fernet.go
@@ -30,6 +30,7 @@ const (
payOffset = ivOffset + aes.BlockSize
overhead = 1 + 8 + aes.BlockSize + sha256.Size // ver + ts + iv + hmac
maxClockSkew = 60 * time.Second
+ uint64Bytes = 8
)
var encoding = base64.URLEncoding
@@ -63,7 +64,7 @@ func decodedLen(n int) int {
// if msg is nil, decrypts in place and returns a slice of tok.
func verify(msg, tok []byte, ttl time.Duration, now time.Time, k *Key) []byte {
- if len(tok) < 1 || tok[0] != version {
+ if len(tok) < 1+uint64Bytes || tok[0] != version {
return nil
}
ts := time.Unix(int64(binary.BigEndian.Uint64(tok[1:])), 0)
@@ -71,6 +72,9 @@ func verify(msg, tok []byte, ttl time.Duration, now time.Time, k *Key) []byte {
return nil
}
n := len(tok) - sha256.Size
+ if n <= 0 {
+ return nil
+ }
var hmac [sha256.Size]byte
genhmac(hmac[:0], tok[:n], k.signBytes())
if subtle.ConstantTimeCompare(tok[n:], hmac[:]) != 1 {
diff --git a/vendor/github.com/fernet/fernet-go/invalid.json b/vendor/github.com/fernet/fernet-go/invalid.json
index d80e7b4a35..ec48ecccf7 100644
--- a/vendor/github.com/fernet/fernet-go/invalid.json
+++ b/vendor/github.com/fernet/fernet-go/invalid.json
@@ -54,5 +54,19 @@
"now": "1985-10-26T01:20:01-07:00",
"ttl_sec": 60,
"secret": "cw_0x689RpI-jtRR7oE8h_eQsKImvJapLeSbXpwF4e4="
+ },
+ {
+ "desc": "very short payload size",
+ "token": "gAAAAABdnQ1TUKh2OE_ggbyCIxfg",
+ "now": "1985-10-26T01:20:01-07:00",
+ "ttl_sec": 0,
+ "secret": "cw_0x689RpI-jtRR7oE8h_eQsKImvJapLeSbXpwF4e4="
+ },
+ {
+ "desc": "super short payload size",
+ "token": "gAAA",
+ "now": "1985-10-26T01:20:01-07:00",
+ "ttl_sec": 0,
+ "secret": "cw_0x689RpI-jtRR7oE8h_eQsKImvJapLeSbXpwF4e4="
}
]
diff --git a/vendor/github.com/google/certificate-transparency-go/.gitignore b/vendor/github.com/google/certificate-transparency-go/.gitignore
index 26073b0df9..8c13cd1c9d 100644
--- a/vendor/github.com/google/certificate-transparency-go/.gitignore
+++ b/vendor/github.com/google/certificate-transparency-go/.gitignore
@@ -15,7 +15,6 @@
/ct_hammer
/data
/dumpscts
-/etcdiscover
/findlog
/goshawk
/gosmin
diff --git a/vendor/github.com/google/certificate-transparency-go/.golangci.yaml b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml
new file mode 100644
index 0000000000..34c803a8cd
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml
@@ -0,0 +1,38 @@
+run:
+ deadline: 90s
+ skip-dirs:
+ - (^|/)x509($|/)
+ - (^|/)x509util($|/)
+ - (^|/)asn1($|/)
+
+linters-settings:
+ gocyclo:
+ min-complexity: 40
+ depguard:
+ list-type: blacklist
+ packages:
+ - ^golang.org/x/net/context$
+ - github.com/gogo/protobuf/proto
+ - encoding/asn1
+ - crypto/x509
+
+linters:
+ disable-all: true
+ enable:
+ - deadcode
+ - depguard
+ - gocyclo
+ - gofmt
+ - goimports
+ - govet
+ - ineffassign
+ - megacheck
+ - misspell
+ - revive
+ - varcheck
+ # TODO(gbelvin): write license linter and commit to upstream.
+ # ./scripts/check_license.sh is run by ./scripts/presubmit.sh
+
+issues:
+ # Don't turn off any checks by default. We can do this explicitly if needed.
+ exclude-use-default: false
diff --git a/vendor/github.com/google/certificate-transparency-go/.travis.yml b/vendor/github.com/google/certificate-transparency-go/.travis.yml
deleted file mode 100644
index 23f38513bd..0000000000
--- a/vendor/github.com/google/certificate-transparency-go/.travis.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-sudo: true # required for CI push into Kubernetes.
-language: go
-os: linux
-go: "1.10"
-
-go_import_path: github.com/google/certificate-transparency-go
-
-env:
- - GCE_CI=${ENABLE_GCE_CI} GOFLAGS=
- - GOFLAGS=-race
- - GOFLAGS= WITH_ETCD=true WITH_COVERAGE=true
- - GOFLAGS=-race WITH_ETCD=true
-
-matrix:
- fast_finish: true
-
-services:
- - docker
-
-install:
- - mkdir ../protoc
- - |
- (
- cd ../protoc
- wget https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
- unzip protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
- )
- - export PATH=$(pwd)/../protoc/bin:$PATH
- - go get -d -t ./...
- - go get github.com/alecthomas/gometalinter
- - gometalinter --install
- - go get -u github.com/golang/protobuf/proto
- - go get -u github.com/golang/protobuf/protoc-gen-go
- - go install github.com/golang/mock/mockgen
- # install vendored etcd binary
- - go install ./vendor/github.com/coreos/etcd/cmd/etcd
- - go install ./vendor/github.com/coreos/etcd/cmd/etcdctl
- - pushd ${GOPATH}/src/github.com/google/trillian
- - go get -d -t ./...
- - popd
-
-script:
- - set -e
- - cd $HOME/gopath/src/github.com/google/certificate-transparency-go
- - ./scripts/presubmit.sh ${PRESUBMIT_OPTS} ${WITH_COVERAGE:+--coverage}
- - |
- # Check re-generation didn't change anything
- status=$(git status --porcelain | grep -v coverage) || :
- if [[ -n ${status} ]]; then
- echo "Regenerated files differ from checked-in versions: ${status}"
- git status
- git diff
- exit 1
- fi
- - |
- if [[ "${WITH_ETCD}" == "true" ]]; then
- export ETCD_DIR="${GOPATH}/bin"
- fi
- - ./trillian/integration/integration_test.sh
- - HAMMER_OPTS="--operations=1500" ./trillian/integration/ct_hammer_test.sh
- - set +e
-
-after_success:
- - cp /tmp/coverage.txt .
- - bash <(curl -s https://codecov.io/bash)
- - |
- # Push up to GCE CI instance if we're running after a merge to master
- if [[ "${GCE_CI}" == "true" ]] && [[ $TRAVIS_PULL_REQUEST == "false" ]] && [[ $TRAVIS_BRANCH == "master" ]]; then
- . scripts/install_cloud.sh
- echo ${GCLOUD_SERVICE_KEY_CI} | base64 --decode -i > ${HOME}/gcloud-service-key.json
- gcloud auth activate-service-account --key-file ${HOME}/gcloud-service-key.json
- rm ${HOME}/gcloud-service-key.json
- . scripts/deploy_gce_ci.sh
- fi
diff --git a/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/google/certificate-transparency-go/AUTHORS
index 649da70b02..5b048dddf5 100644
--- a/vendor/github.com/google/certificate-transparency-go/AUTHORS
+++ b/vendor/github.com/google/certificate-transparency-go/AUTHORS
@@ -11,7 +11,7 @@
Comodo CA Limited
Ed Maste <emaste@freebsd.org>
Fiaz Hossain <fiaz.hossain@salesforce.com>
-Google Inc.
+Google LLC
Internet Security Research Group
Jeff Trawick <trawick@gmail.com>
Katriel Cohn-Gordon <katriel.cohn-gordon@cybersecurity.ox.ac.uk>
diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
index cb8b7e3530..813fc22214 100644
--- a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
+++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
@@ -1,5 +1,398 @@
# CERTIFICATE-TRANSPARENCY-GO Changelog
+## HEAD
+
+### Integration
+
+ * Breaking change to API for `integration.HammerCTLog`:
+ * Added `ctx` as first argument, and terminate loop if it becomes cancelled
+
+### JSONClient
+
+ * PostAndParseWithRetry now does backoff-and-retry upon receiving HTTP 429.
+
+### Cleanup
+
+ * `WithBalancerName` is deprecated and removed, using the recommended way.
+ * `ctfe.PEMCertPool` type has been moved to `x509util.PEMCertPool` to reduce
+ dependencies (#903).
+ * Remove log list v1 package and its dependencies.
+
+### Migrillian
+
+* #960: Skip consistency check when root is size zero.
+
+### Misc
+
+ * updated golangci-lint to v1.46.1 (developers should update to this version)
+ * update `google.golang.org/grpc` to v1.46.0
+ * `ctclient` tool now uses Cobra for better CLI experience (#901).
+ * #800: Remove dependency from `ratelimit`.
+ * #927: Add read-only mode to CTFE config.
+ * Update Trillian to [0a389c4](https://github.com/google/trillian/commit/0a389c4bb8d97fb3be8f55d7e5b428cf4304986f)
+ * Migrate loglist dependency from v1 to v3 in ctclient cmd.
+ * Migrate loglist dependency from v1 to v3 in ctutil/loginfo.go
+ * Migrate loglist dependency from v1 to v3 in ctutil/sctscan.go
+ * Migrate loglist dependency from v1 to v3 in trillian/integration/ct_hammer/main.go
+
+## v1.1.2
+
+### CTFE
+
+ * Removed the `-by_range` flag.
+
+### Updated dependencies
+
+ * Trillian from v1.3.11 to v1.4.0
+ * protobuf to v2
+
+## v1.1.1
+[Published 2020-10-06](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.1)
+
+### Tools
+
+#### CT Hammer
+
+Added a flag (--strict_sth_consistency_size) which when set to true enforces the current behaviour of only request consistency proofs between tree sizes for which the hammer has seen valid STHs.
+When setting this flag to false, if no two usable STHs are available the hammer will attempt to request a consistency proof between the latest STH it's seen and a random smaller (but > 0) tree size.
+
+
+### CTFE
+
+#### Caching
+
+The CTFE now includes a Cache-Control header in responses containing purely
+immutable data, e.g. those for get-entries and get-proof-by-hash. This allows
+clients and proxies to cache these responses for up to 24 hours.
+
+#### EKU Filtering
+
+> :warning: **It is not yet recommended to enable this option in a production CT Log!**
+
+CTFE now supports filtering logging submissions by leaf certificate EKU.
+This is enabled by adding an extKeyUsage list to a log's stanza in the
+config file.
+
+The format is a list of strings corresponding to the supported golang x509 EKUs:
+ |Config string | Extended Key Usage |
+ |----------------------------|----------------------------------------|
+ |`Any` | ExtKeyUsageAny |
+ |`ServerAuth` | ExtKeyUsageServerAuth |
+ |`ClientAuth` | ExtKeyUsageClientAuth |
+ |`CodeSigning` | ExtKeyUsageCodeSigning |
+ |`EmailProtection` | ExtKeyUsageEmailProtection |
+ |`IPSECEndSystem` | ExtKeyUsageIPSECEndSystem |
+ |`IPSECTunnel` | ExtKeyUsageIPSECTunnel |
+ |`IPSECUser` | ExtKeyUsageIPSECUser |
+ |`TimeStamping` | ExtKeyUsageTimeStamping |
+ |`OCSPSigning` | ExtKeyUsageOCSPSigning |
+ |`MicrosoftServerGatedCrypto`| ExtKeyUsageMicrosoftServerGatedCrypto |
+ |`NetscapeServerGatedCrypto` | ExtKeyUsageNetscapeServerGatedCrypto |
+
+When an extKeyUsage list is specified, the CT Log will reject logging
+submissions for leaf certificates that do not contain an EKU present in this
+list.
+
+When enabled, EKU filtering is only performed at the leaf level (i.e. there is
+no 'nested' EKU filtering performed).
+
+If no list is specified, or the list contains an `Any` entry, no EKU
+filtering will be performed.
+
+#### GetEntries
+Calls to `get-entries` which are at (or above) the maximum permitted number of
+entries whose `start` parameter does not fall on a multiple of the maximum
+permitted number of entries, will have their responses truncated such that
+subsequent requests will align with this boundary.
+This is intended to coerce callers of `get-entries` into all using the same
+`start` and `end` parameters and thereby increase the cachability of
+these requests.
+
+e.g.:
+
+<pre>
+Old behaviour:
+ 1 2 3
+ 0 0 0
+Entries>-----|---------|---------|----...
+Client A -------|---------|----------|...
+Client B --|--------|---------|-------...
+ ^ ^ ^
+ `--------`---------`---- requests
+
+With coercion (max batch = 10 entries):
+ 1 2 3
+ 0 0 0
+Entries>-----|---------|---------|----...
+Client A ----X---------|---------|...
+Client B --|-X---------|---------|-------...
+ ^
+ `-- Requests truncated
+</pre>
+
+This behaviour can be disabled by setting the `--align_getentries`
+flag to false.
+
+#### Flags
+
+The `ct_server` binary changed the default of these flags:
+
+- `by_range` - Now defaults to `true`
+
+The `ct_server` binary added the following flags:
+- `align_getentries` - See GetEntries section above for details
+
+Added `backend` flag to `migrillian`, which now replaces the deprecated
+"backend" feature of Migrillian configs.
+
+#### FixedBackendResolver Replaced
+
+This was previously used in situations where a comma separated list of
+backends was provided in the `rpcBackend` flag rather than a single value.
+
+It has been replaced by equivalent functionality using a newer gRPC API.
+However this support was only intended for use in integration tests. In
+production we recommend the use of etcd or a gRPC load balancer.
+
+### LogList
+
+Log list tools updated to use the correct v2 URL (from v2_beta previously).
+
+### Libraries
+
+#### x509 fork
+
+Merged upstream Go 1.13 and Go 1.14 changes (with the exception
+of https://github.com/golang/go/commit/14521198679e, to allow
+old certs using a malformed root still to be logged).
+
+#### asn1 fork
+
+Merged upstream Go 1.14 changes.
+
+#### ctutil
+
+Added VerifySCTWithVerifier() to verify SCTs using a given ct.SignatureVerifier.
+
+### Configuration Files
+
+Configuration files that previously had to be text-encoded Protobuf messages can
+now alternatively be binary-encoded instead.
+
+### JSONClient
+
+- `PostAndParseWithRetry` error logging now includes log URI in messages.
+
+### Minimal Gossip Example
+
+All the code for this, except for the x509ext package, has been moved over
+to the [trillian-examples](https://github.com/google/trillian-examples) repository.
+
+This keeps the code together and removes a circular dependency between the
+two repositories. The package layout and structure remains the same so
+updating should just mean changing any relevant import paths.
+
+### Dependencies
+
+A circular dependency on the [monologue](https://github.com/google/monologue) repository has been removed.
+
+A circular dependency on the [trillian-examples](https://github.com/google/trillian-examples) repository has been removed.
+
+The version of trillian in use has been updated to 1.3.11. This has required
+various other dependency updates including gRPC and protobuf. This code now
+uses the v2 proto API. The Travis tests now expect the 3.11.4 version of
+protoc.
+
+The version of etcd in use has been switched to the one from `go.etcd.io`.
+
+Most of the above changes are to align versions more closely with the ones
+used in the trillian repository.
+
+## v1.1.0
+
+Published 2019-11-14 15:00:00 +0000 UTC
+
+### CTFE
+
+The `reject_expired` and `reject_unexpired` configuration fields for the CTFE
+have been changed so that their behaviour reflects their name:
+
+- `reject_expired` only rejects expired certificates (i.e. it now allows
+ not-yet-valid certificates).
+- `reject_unexpired` only allows expired certificates (i.e. it now rejects
+ not-yet-valid certificates).
+
+A `reject_extensions` configuration field for the CTFE was added, this allows
+submissions to be rejected if they contain an extension with any of the
+specified OIDs.
+
+A `frozen_sth` configuration field for the CTFE was added. This STH will be
+served permanently. It must be signed by the log's private key.
+
+A `/healthz` URL has been added which responds with HTTP 200 OK and the string
+"ok" when the server is up.
+
+#### Flags
+
+The `ct_server` binary has these new flags:
+
+- `mask_internal_errors` - Removes error strings from HTTP 500 responses
+ (Internal Server Error)
+
+Removed default values for `--metrics_endpoint` and `--log_rpc_server` flags.
+This makes it easier to get the documented "unset" behaviour.
+
+#### Metrics
+
+The CTFE exports these new metrics:
+
+- `is_mirror` - set to 1 for mirror logs (copies of logs hosted elsewhere)
+- `frozen_sth_timestamp` - time of the frozen Signed Tree Head in milliseconds
+ since the epoch
+
+#### Kubernetes
+
+Updated prometheus-to-sd to v0.5.2.
+
+A dedicated node pool is no longer required by the Kubernetes manifests.
+
+### Log Lists
+
+A new package has been created for parsing, searching and creating JSON log
+lists compatible with the
+[v2 schema](http://www.gstatic.com/ct/log_list/v2_beta/log_list_schema.json):
+`github.com/google/certificate-transparency-go/loglist2`.
+
+### Docker Images
+
+Our Docker images have been updated to use Go 1.11 and
+[Distroless base images](https://github.com/GoogleContainerTools/distroless).
+
+The CTFE Docker image now sets `ENTRYPOINT`.
+
+### Utilities / Libraries
+
+#### jsonclient
+
+The `jsonclient` package now copes with empty HTTP responses. The user-agent
+header it sends can now be specified.
+
+#### x509 and asn1 forks
+
+Merged upstream changes from Go 1.12 into the `asn1` and `x509` packages.
+
+Added a "lax" tag to `asn1` that applies recursively and makes some checks more
+relaxed:
+
+- parsePrintableString() copes with invalid PrintableString contents, e.g. use
+ of tagPrintableString when the string data is really ISO8859-1.
+- checkInteger() allows integers that are not minimally encoded (and so are
+ not correct DER).
+- OIDs are allowed to be empty.
+
+The following `x509` functions will now return `x509.NonFatalErrors` if ASN.1
+parsing fails in strict mode but succeeds in lax mode. Previously, they only
+attempted strict mode parsing.
+
+- `x509.ParseTBSCertificate()`
+- `x509.ParseCertificate()`
+- `x509.ParseCertificates()`
+
+The `x509` package will now treat a negative RSA modulus as a non-fatal error.
+
+The `x509` package now supports RSASES-OAEP and Ed25519 keys.
+
+#### ctclient
+
+The `ctclient` tool now defaults to using
+[all_logs_list.json](https://www.gstatic.com/ct/log_list/all_logs_list.json)
+instead of [log_list.json](https://www.gstatic.com/ct/log_list/log_list.json).
+This can be overridden using the `--log_list` flag.
+
+It can now perform inclusion checks on pre-certificates.
+
+It has these new commands:
+
+- `bisect` - Finds a log entry given a timestamp.
+
+It has these new flags:
+
+- `--chain` - Displays the entire certificate chain
+- `--dns_server` - The DNS server to direct queries to (system resolver by
+ default)
+- `--skip_https_verify` - Skips verification of the HTTPS connection
+- `--timestamp` - Timestamp to use for `bisect` and `inclusion` commands (for
+ `inclusion`, only if --leaf_hash is not used)
+
+It now accepts hex or base64-encoded strings for the `--tree_hash`,
+`--prev_hash` and `--leaf_hash` flags.
+
+#### certcheck
+
+The `certcheck` tool has these new flags:
+
+- `--check_time` - Check current validity of certificate (replaces
+ `--timecheck`)
+- `--check_name` - Check validity of certificate name
+- `--check_eku` - Check validity of EKU nesting
+- `--check_path_len` - Check validity of path length constraint
+- `--check_name_constraint` - Check name constraints
+- `--check_unknown_critical_exts` - Check for unknown critical extensions
+ (replaces `--ignore_unknown_critical_exts`)
+- `--strict` - Set non-zero exit code for non-fatal errors in parsing
+
+#### sctcheck
+
+The `sctcheck` tool has these new flags:
+
+- `--check_inclusion` - Checks that the SCT was honoured (i.e. the
+ corresponding certificate was included in the issuing CT log)
+
+#### ct_hammer
+
+The `ct_hammer` tool has these new flags:
+
+- `--duplicate_chance` - Allows setting the probability of the hammer sending
+ a duplicate submission.
+
+## v1.0.21 - CTFE Logging / Path Options. Mirroring. RPKI. Non Fatal X.509 error improvements
+
+Published 2018-08-20 10:11:04 +0000 UTC
+
+### CTFE
+
+`CTFE` no longer prints certificate chains as long byte strings in messages when handler errors occur. This was obscuring the reason for the failure and wasn't particularly useful.
+
+`CTFE` now has a global log URL path prefix flag and a configuration proto for a log specific path. The latter should help for various migration strategies if existing C++ server logs are going to be converted to run on the new code.
+
+### Mirroring
+
+More progress has been made on log mirroring. We believe that it's now at the point where testing can begin.
+
+### Utilities / Libraries
+
+The `certcheck` and `ct_hammer` utilities have received more enhancements.
+
+`x509` and `x509util` now support Subject Information Access and additional extensions for [RPKI / RFC 3779](https://www.ietf.org/rfc/rfc3779.txt).
+
+`scanner` / `fixchain` and some other command line utilities now have better handling of non-fatal errors.
+
+Commit [3629d6846518309d22c16fee15d1007262a459d2](https://api.github.com/repos/google/certificate-transparency-go/commits/3629d6846518309d22c16fee15d1007262a459d2) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.21)
+
+## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements
+
+Published 2018-07-05 09:21:34 +0000 UTC
+
+Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`.
+
+The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`.
+
+An implementation of Gossip has been added. See the `gossip/minimal` package for more information.
+
+An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10.
+
+Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20)
+
## v1.0.19 - CTFE User Quota
Published 2018-06-01 13:51:52 +0000 UTC
@@ -12,10 +405,10 @@ Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/g
Published 2018-06-01 14:28:20 +0000 UTC
-Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
-
-The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
-
+Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
+
+The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
+
The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package.
Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18)
@@ -24,12 +417,12 @@ Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/g
Published 2018-06-01 14:25:16 +0000 UTC
-Now uses Merkle Tree verification from Trillian.
-
-The CT server now supports CORS.
-
-Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
-
+Now uses Merkle Tree verification from Trillian.
+
+The CT server now supports CORS.
+
+Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
+
A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project.
Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17)
@@ -38,8 +431,8 @@ Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/g
Published 2018-06-01 14:22:23 +0000 UTC
-An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
-
+An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
+
Changes to `x509` were merged from Go 1.10.1.
Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16)
@@ -48,10 +441,10 @@ Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/g
Published 2018-06-01 14:20:32 +0000 UTC
-Facilities were added to the `x509` package to control whether verification checks are applied.
-
-Log server requests are now balanced using `gRPClb`.
-
+Facilities were added to the `x509` package to control whether verification checks are applied.
+
+Log server requests are now balanced using `gRPClb`.
+
For Kubernetes, metrics can be published to Stackdriver monitoring.
Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15)
@@ -60,8 +453,8 @@ Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/g
Published 2018-06-01 14:15:37 +0000 UTC
-Support for SQLlite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
-
+Support for SQLite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
+
A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests.
Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14)
@@ -70,10 +463,10 @@ Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/g
Published 2018-06-01 14:15:21 +0000 UTC
-Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
-
-Updates were made to GCE ingress and health checks.
-
+Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
+
+Updates were made to GCE ingress and health checks.
+
The log list utility can verify signatures.
Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13)
@@ -82,10 +475,10 @@ Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/g
Published 2018-06-01 14:13:42 +0000 UTC
-The CT client can now use a JSON loglist to find logs.
-
-CTFE had a fix applied for preissued precerts.
-
+The CT client can now use a JSON loglist to find logs.
+
+CTFE had a fix applied for preissued precerts.
+
A DNS client was added and CT client was extended to support DNS retrieval.
Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12)
@@ -102,8 +495,8 @@ Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/g
Published 2018-06-01 14:09:47 +0000 UTC
-The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
-
+The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
+
The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1.
Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10)
@@ -112,10 +505,10 @@ Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/g
Published 2018-06-01 14:11:13 +0000 UTC
-The `scanner` utility now displays throughput stats.
-
-Build instructions and README files were updated.
-
+The `scanner` utility now displays throughput stats.
+
+Build instructions and README files were updated.
+
The `certcheck` utility can be told to ignore unknown critical X.509 extensions.
Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9)
@@ -191,4 +584,3 @@ Published 2018-06-01 13:59:00 +0000 UTC
This is the point that corresponds to the 1.0 release in the trillian repo.
Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0)
-
diff --git a/vendor/github.com/google/certificate-transparency-go/CODEOWNERS b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS
new file mode 100644
index 0000000000..0c931e87ce
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS
@@ -0,0 +1 @@
+* @google/certificate-transparency
diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS
index 4336fc52e2..e2c0451bf8 100644
--- a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS
+++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS
@@ -47,11 +47,14 @@ Oliver Weidner <Oliver.Weidner@gmail.com>
Pascal Leroy <phl@google.com>
Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
Paul Lietar <lietar@google.com>
+Pavel Kalinnikov <pkalinnikov@google.com> <pavelkalinnikov@gmail.com>
Pierre Phaneuf <pphaneuf@google.com>
Rob Percival <robpercival@google.com>
Rob Stradling <rob@comodo.com>
+Roger Ng <rogerng@google.com> <roger2hk@gmail.com>
Roland Shoemaker <roland@letsencrypt.org>
Ruslan Kovalov <ruslan.kovalyov@gmail.com>
Samuel Lidén Borell <samuel@kodafritt.se>
+Tatiana Merkulova <merkulova@google.com>
Vladimir Rutsky <vladimir@rutsky.org>
Ximin Luo <infinity0@gmx.com>
diff --git a/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000..c3c0feb3ab
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,16 @@
+<!---
+Describe your changes in detail here.
+If this fixes an issue, please write "Fixes #123", substituting the issue number.
+-->
+
+### Checklist
+
+<!---
+Go over all the following points, and put an `x` in all the boxes that apply.
+Feel free to not tick any boxes that don't apply to this PR (e.g. refactoring may not need a CHANGELOG update).
+If you're unsure about any of these, don't hesitate to ask. We're here to help!
+-->
+
+- [ ] I have updated the [CHANGELOG](CHANGELOG.md).
+ - Adjust the draft version number according to [semantic versioning](https://semver.org/) rules.
+- [ ] I have updated [documentation](docs/) accordingly.
diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md
index 6b71eaa987..7284bb86d7 100644
--- a/vendor/github.com/google/certificate-transparency-go/README.md
+++ b/vendor/github.com/google/certificate-transparency-go/README.md
@@ -6,14 +6,14 @@
This repository holds Go code related to
[Certificate Transparency](https://www.certificate-transparency.org/) (CT). The
-repository requires Go version 1.9.
+repository requires Go version 1.17.
- [Repository Structure](#repository-structure)
- [Trillian CT Personality](#trillian-ct-personality)
- [Working on the Code](#working-on-the-code)
+ - [Running Codebase Checks](#running-codebase-checks)
- [Rebuilding Generated Code](#rebuilding-generated-code)
- [Updating Vendor Code](#updating-vendor-code)
- - [Running Codebase Checks](#running-codebase-checks)
## Repository Structure
@@ -29,57 +29,44 @@ The main parts of the repository are:
[pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1).
- `tls` holds a library for processing TLS-encoded data as described in
[RFC 5246](https://tools.ietf.org/html/rfc5246).
- - `x509util` provides additional utilities for dealing with
+ - `x509util/` provides additional utilities for dealing with
`x509.Certificate`s.
- CT client libraries:
- The top-level `ct` package (in `.`) holds types and utilities for working
with CT data structures defined in
[RFC 6962](https://tools.ietf.org/html/rfc6962).
- `client/` and `jsonclient/` hold libraries that allow access to CT Logs
- via entrypoints described in
+ via HTTP entrypoints described in
[section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4).
+ - `dnsclient/` has a library that allows access to CT Logs over
+ [DNS](https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md).
- `scanner/` holds a library for scanning the entire contents of an existing
CT Log.
+ - CT Personality for [Trillian](https://github.com/google/trillian):
+ - `trillian/` holds code that allows a Certificate Transparency Log to be
+ run using a Trillian Log as its back-end -- see
+ [below](#trillian-ct-personality).
- Command line tools:
- - `./client/ctclient` allows interaction with a CT Log
+ - `./client/ctclient` allows interaction with a CT Log.
+ - `./ctutil/sctcheck` allows SCTs (signed certificate timestamps) from a CT
+ Log to be verified.
- `./scanner/scanlog` allows an existing CT Log to be scanned for certificates
of interest; please be polite when running this tool against a Log.
- `./x509util/certcheck` allows display and verification of certificates
- `./x509util/crlcheck` allows display and verification of certificate
revocation lists (CRLs).
- - CT Personality for [Trillian](https://github.com/google/trillian):
- - `trillian/` holds code that allows a Certificate Transparency Log to be
- run using a Trillian Log as its back-end -- see
- [below](#trillian-ct-personality).
+ - Other libraries related to CT:
+ - `ctutil/` holds utility functions for validating and verifying CT data
+ structures.
+ - `loglist3/` has a library for reading
+ [v3 JSON lists of CT Logs](https://groups.google.com/a/chromium.org/g/ct-policy/c/IdbrdAcDQto/m/i5KPyzYwBAAJ).
## Trillian CT Personality
The `trillian/` subdirectory holds code and scripts for running a CT Log based
-on the [Trillian](https://github.com/google/trillian) general transparency Log.
-
-The main code for the CT personality is held in `trillian/ctfe`; this code
-responds to HTTP requests on the
-[CT API paths](https://tools.ietf.org/html/rfc6962#section-4) and translates
-them to the equivalent gRPC API requests to the Trillian Log.
-
-This obviously relies on the gRPC API definitions at
-`github.com/google/trillian`; the code also uses common libraries from the
-Trillian project for:
- - exposing monitoring and statistics via an `interface` and corresponding
- Prometheus implementation (`github.com/google/trillian/monitoring/...`)
- - dealing with cryptographic keys (`github.com/google/trillian/crypto/...`).
-
-The `trillian/integration/` directory holds scripts and tests for running the whole
-system locally. In particular:
- - `trillian/integration/ct_integration_test.sh` brings up local processes
- running a Trillian Log server, signer and a CT personality, and exercises the
- complete set of RFC 6962 API entrypoints.
- - `trillian/integration/ct_hammer_test.sh` brings up a complete system and runs
- a continuous randomized test of the CT entrypoints.
-
-These scripts require a local database instance to be configured as described
-in the [Trillian instructions](https://github.com/google/trillian#mysql-setup).
+on the [Trillian](https://github.com/google/trillian) general transparency Log,
+and is [documented separately](trillian/README.md).
## Working on the Code
@@ -90,48 +77,15 @@ dependencies and tools, described in the following sections. The
for the required tools and scripts, as it may be more up-to-date than this
document.
-### Rebuilding Generated Code
-
-Some of the CT Go code is autogenerated from other files:
-
- - [Protocol buffer](https://developers.google.com/protocol-buffers/) message
- definitions are converted to `.pb.go` implementations.
- - A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is
- created with [GoMock](https://github.com/golang/mock).
-
-Re-generating mock or protobuffer files is only needed if you're changing
-the original files; if you do, you'll need to install the prerequisites:
-
- - `mockgen` tool from https://github.com/golang/mock
- - `protoc`, [Go support for protoc](https://github.com/golang/protobuf) (see
- documentation linked from the
- [protobuf site](https://github.com/google/protobuf))
-
-and run the following:
-
-```bash
-go generate -x ./... # hunts for //go:generate comments and runs them
-```
-
-### Updating Vendor Code
-
-The codebase includes a couple of external projects under the `vendor/`
-subdirectory, to ensure that builds use a fixed version (typically because the
-upstream repository does not guarantee back-compatibility between the tip
-`master` branch and the current stable release). See
-[instructions in the Trillian repo](https://github.com/google/trillian#updating-vendor-code)
-for how to update vendored subtrees.
-
-
### Running Codebase Checks
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
-and tests over the codebase.
+and tests over the codebase; please ensure this script passes before sending
+pull requests for review.
```bash
-# Install gometalinter and all linters
-go get -u github.com/alecthomas/gometalinter
-gometalinter --install
+# Install golangci-lint
+go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.1
# Run code generation, build, test and linters
./scripts/presubmit.sh
@@ -140,5 +94,27 @@ gometalinter --install
./scripts/presubmit.sh --no-generate
# Or just run the linters alone:
-gometalinter --config=gometalinter.json ./...
+golangci-lint run
+```
+
+### Rebuilding Generated Code
+
+Some of the CT Go code is autogenerated from other files:
+
+- [Protocol buffer](https://developers.google.com/protocol-buffers/) message
+ definitions are converted to `.pb.go` implementations.
+- A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is
+ created with [GoMock](https://github.com/golang/mock).
+
+Re-generating mock or protobuffer files is only needed if you're changing
+the original files; if you do, you'll need to install the prerequisites:
+
+- tools written in `go` can be installed with a single run of `go install`
+ (courtesy of [`tools.go`](./tools/tools.go) and `go.mod`).
+- `protoc` tool: you'll need [version 3.12.4](https://github.com/protocolbuffers/protobuf/releases/tag/v3.12.4) installed, and `PATH` updated to include its `bin/` directory.
+
+With tools installed, run the following:
+
+```bash
+go generate -x ./... # hunts for //go:generate comments and runs them
```
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/README.md b/vendor/github.com/google/certificate-transparency-go/asn1/README.md
new file mode 100644
index 0000000000..a42ac4ebe3
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/README.md
@@ -0,0 +1,7 @@
+# Important Notice
+
+This is a fork of the `encoding/asn1` Go package. The original source can be found on
+[GitHub](https://github.com/golang/go).
+
+Be careful about making local modifications to this code as it will
+make maintenance harder in future.
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go
index 3af7c48760..aaca5fd260 100644
--- a/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go
@@ -5,13 +5,24 @@
// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
// as defined in ITU-T Rec X.690.
//
-// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
+// See also “A Layman's Guide to a Subset of ASN.1, BER, and DER,”
// http://luca.ntop.org/Teaching/Appunti/asn1.html.
//
// This is a fork of the Go standard library ASN.1 implementation
-// (encoding/asn1). The main difference is that this version tries to correct
-// for errors (e.g. use of tagPrintableString when the string data is really
-// ISO8859-1 - a common error present in many x509 certificates in the wild.)
+// (encoding/asn1), with the aim of relaxing checks for various things
+// that are common errors present in many X.509 certificates in the
+// wild.
+//
+// Main differences:
+// - Extra "lax" tag that recursively applies and relaxes some strict
+// checks:
+// - parsePrintableString() copes with invalid PrintableString contents,
+// e.g. use of tagPrintableString when the string data is really
+// ISO8859-1.
+// - checkInteger() allows integers that are not minimally encoded (and
+// so are not correct DER).
+// - parseObjectIdentifier() allows zero-length OIDs.
+// - Better diagnostics on which particular field causes errors.
package asn1
// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
@@ -31,8 +42,8 @@ import (
"math/big"
"reflect"
"strconv"
- "strings"
"time"
+ "unicode/utf16"
"unicode/utf8"
)
@@ -94,13 +105,16 @@ func parseBool(bytes []byte, fieldName string) (ret bool, err error) {
// checkInteger returns nil if the given bytes are a valid DER-encoded
// INTEGER and an error otherwise.
-func checkInteger(bytes []byte, fieldName string) error {
+func checkInteger(bytes []byte, lax bool, fieldName string) error {
if len(bytes) == 0 {
return StructuralError{"empty integer", fieldName}
}
if len(bytes) == 1 {
return nil
}
+ if lax {
+ return nil
+ }
if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
return StructuralError{"integer not minimally-encoded", fieldName}
}
@@ -109,8 +123,8 @@ func checkInteger(bytes []byte, fieldName string) error {
// parseInt64 treats the given bytes as a big-endian, signed integer and
// returns the result.
-func parseInt64(bytes []byte, fieldName string) (ret int64, err error) {
- err = checkInteger(bytes, fieldName)
+func parseInt64(bytes []byte, lax bool, fieldName string) (ret int64, err error) {
+ err = checkInteger(bytes, lax, fieldName)
if err != nil {
return
}
@@ -132,11 +146,11 @@ func parseInt64(bytes []byte, fieldName string) (ret int64, err error) {
// parseInt treats the given bytes as a big-endian, signed integer and returns
// the result.
-func parseInt32(bytes []byte, fieldName string) (int32, error) {
- if err := checkInteger(bytes, fieldName); err != nil {
+func parseInt32(bytes []byte, lax bool, fieldName string) (int32, error) {
+ if err := checkInteger(bytes, lax, fieldName); err != nil {
return 0, err
}
- ret64, err := parseInt64(bytes, fieldName)
+ ret64, err := parseInt64(bytes, lax, fieldName)
if err != nil {
return 0, err
}
@@ -150,8 +164,8 @@ var bigOne = big.NewInt(1)
// parseBigInt treats the given bytes as a big-endian, signed integer and returns
// the result.
-func parseBigInt(bytes []byte, fieldName string) (*big.Int, error) {
- if err := checkInteger(bytes, fieldName); err != nil {
+func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) {
+ if err := checkInteger(bytes, lax, fieldName); err != nil {
return nil, err
}
ret := new(big.Int)
@@ -270,8 +284,11 @@ func (oi ObjectIdentifier) String() string {
// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
// returns it. An object identifier is a sequence of variable length integers
// that are assigned in a hierarchy.
-func parseObjectIdentifier(bytes []byte, fieldName string) (s []int, err error) {
+func parseObjectIdentifier(bytes []byte, lax bool, fieldName string) (s ObjectIdentifier, err error) {
if len(bytes) == 0 {
+ if lax {
+ return ObjectIdentifier{}, nil
+ }
err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName}
return
}
@@ -415,10 +432,25 @@ func isNumeric(b byte) bool {
// parsePrintableString parses an ASN.1 PrintableString from the given byte
// array and returns it.
-func parsePrintableString(bytes []byte, fieldName string) (ret string, err error) {
+func parsePrintableString(bytes []byte, lax bool, fieldName string) (ret string, err error) {
for _, b := range bytes {
if !isPrintable(b, allowAsterisk, allowAmpersand) {
- err = SyntaxError{"PrintableString contains invalid character", fieldName}
+ if !lax {
+ err = SyntaxError{"PrintableString contains invalid character", fieldName}
+ } else {
+ // Might be an ISO8859-1 string stuffed in, check if it
+ // would be valid and assume that's what's happened if so,
+ // otherwise try T.61, failing that give up and just assign
+ // the bytes
+ switch {
+ case couldBeISO8859_1(bytes):
+ ret, err = iso8859_1ToUTF8(bytes), nil
+ case couldBeT61(bytes):
+ ret, err = parseT61String(bytes)
+ default:
+ err = SyntaxError{"PrintableString contains invalid character, couldn't determine correct String type", fieldName}
+ }
+ }
return
}
}
@@ -495,6 +527,29 @@ func parseUTF8String(bytes []byte) (ret string, err error) {
return string(bytes), nil
}
+// BMPString
+
+// parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of
+// ISO/IEC/ITU 10646-1) from the given byte slice and returns it.
+func parseBMPString(bmpString []byte) (string, error) {
+ if len(bmpString)%2 != 0 {
+ return "", errors.New("pkcs12: odd-length BMP string")
+ }
+
+ // Strip terminator if present.
+ if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
+ bmpString = bmpString[:l-2]
+ }
+
+ s := make([]uint16, 0, len(bmpString)/2)
+ for len(bmpString) > 0 {
+ s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
+ bmpString = bmpString[2:]
+ }
+
+ return string(utf16.Decode(s)), nil
+}
+
// A RawValue represents an undecoded ASN.1 object.
type RawValue struct {
Class, Tag int
@@ -592,7 +647,7 @@ func parseTagAndLength(bytes []byte, initOffset int, fieldName string) (ret tagA
// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
// a number of ASN.1 values from the given byte slice and returns them as a
// slice of Go values of the given type.
-func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, fieldName string) (ret reflect.Value, err error) {
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, lax bool, fieldName string) (ret reflect.Value, err error) {
matchAny, expectedTag, compoundType, ok := getUniversalType(elemType)
if !ok {
err = StructuralError{"unknown Go type for slice", fieldName}
@@ -609,7 +664,7 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type
return
}
switch t.tag {
- case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString:
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
// We pretend that various other string types are
// PRINTABLE STRINGs so that a sequence of them can be
// parsed into a []string.
@@ -631,7 +686,7 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type
numElements++
}
ret = reflect.MakeSlice(sliceType, numElements, numElements)
- params := fieldParameters{}
+ params := fieldParameters{lax: lax}
offset := 0
for i := 0; i < numElements; i++ {
offset, err = parseField(ret.Index(i), bytes, offset, params)
@@ -653,7 +708,7 @@ var (
bigIntType = reflect.TypeOf(new(big.Int))
)
-// invalidLength returns true iff offset + length > sliceLength, or if the
+// invalidLength reports whether offset + length > sliceLength, or if the
// addition would overflow.
func invalidLength(offset, length, sliceLength int) bool {
return offset+length < offset || offset+length > sliceLength
@@ -735,22 +790,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
innerBytes := bytes[offset : offset+t.length]
switch t.tag {
case TagPrintableString:
- result, err = parsePrintableString(innerBytes, params.name)
- if err != nil && strings.Contains(err.Error(), "PrintableString contains invalid character") {
- // Probably an ISO8859-1 string stuffed in, check if it
- // would be valid and assume that's what's happened if so,
- // otherwise try T.61, failing that give up and just assign
- // the bytes
- switch {
- case couldBeISO8859_1(innerBytes):
- result, err = iso8859_1ToUTF8(innerBytes), nil
- case couldBeT61(innerBytes):
- result, err = parseT61String(innerBytes)
- default:
- result = nil
- err = errors.New("PrintableString contains invalid character, but couldn't determine correct String type.")
- }
- }
+ result, err = parsePrintableString(innerBytes, params.lax, params.name)
case TagNumericString:
result, err = parseNumericString(innerBytes, params.name)
case TagIA5String:
@@ -760,17 +800,19 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
case TagUTF8String:
result, err = parseUTF8String(innerBytes)
case TagInteger:
- result, err = parseInt64(innerBytes, params.name)
+ result, err = parseInt64(innerBytes, params.lax, params.name)
case TagBitString:
result, err = parseBitString(innerBytes, params.name)
case TagOID:
- result, err = parseObjectIdentifier(innerBytes, params.name)
+ result, err = parseObjectIdentifier(innerBytes, params.lax, params.name)
case TagUTCTime:
result, err = parseUTCTime(innerBytes)
case TagGeneralizedTime:
result, err = parseGeneralizedTime(innerBytes)
case TagOctetString:
result = innerBytes
+ case TagBMPString:
+ result, err = parseBMPString(innerBytes)
default:
// If we don't know how to handle the type, we just leave Value as nil.
}
@@ -839,7 +881,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
if universalTag == TagPrintableString {
if t.class == ClassUniversal {
switch t.tag {
- case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString:
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
universalTag = t.tag
}
} else if params.stringType != 0 {
@@ -873,6 +915,12 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
matchAnyClassAndTag = false
}
+ if !params.explicit && params.private && params.tag != nil {
+ expectedClass = ClassPrivate
+ expectedTag = *params.tag
+ matchAnyClassAndTag = false
+ }
+
// We have unwrapped any explicit tagging at this point.
if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) ||
(!matchAny && t.isCompound != compoundType) {
@@ -899,7 +947,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
v.Set(reflect.ValueOf(result))
return
case objectIdentifierType:
- newSlice, err1 := parseObjectIdentifier(innerBytes, params.name)
+ newSlice, err1 := parseObjectIdentifier(innerBytes, params.lax, params.name)
v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
if err1 == nil {
reflect.Copy(v, reflect.ValueOf(newSlice))
@@ -927,7 +975,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
err = err1
return
case enumeratedType:
- parsedInt, err1 := parseInt32(innerBytes, params.name)
+ parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
if err1 == nil {
v.SetInt(int64(parsedInt))
}
@@ -937,7 +985,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
v.SetBool(true)
return
case bigIntType:
- parsedInt, err1 := parseBigInt(innerBytes, params.name)
+ parsedInt, err1 := parseBigInt(innerBytes, params.lax, params.name)
if err1 == nil {
v.Set(reflect.ValueOf(parsedInt))
}
@@ -954,13 +1002,13 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
return
case reflect.Int, reflect.Int32, reflect.Int64:
if val.Type().Size() == 4 {
- parsedInt, err1 := parseInt32(innerBytes, params.name)
+ parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
if err1 == nil {
val.SetInt(int64(parsedInt))
}
err = err1
} else {
- parsedInt, err1 := parseInt64(innerBytes, params.name)
+ parsedInt, err1 := parseInt64(innerBytes, params.lax, params.name)
if err1 == nil {
val.SetInt(parsedInt)
}
@@ -992,6 +1040,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
}
innerParams := parseFieldParameters(field.Tag.Get("asn1"))
innerParams.name = field.Name
+ innerParams.lax = params.lax
innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams)
if err != nil {
return
@@ -1008,7 +1057,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
reflect.Copy(val, reflect.ValueOf(innerBytes))
return
}
- newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.name)
+ newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.lax, params.name)
if err1 == nil {
val.Set(newSlice)
}
@@ -1018,7 +1067,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
var v string
switch universalTag {
case TagPrintableString:
- v, err = parsePrintableString(innerBytes, params.name)
+ v, err = parsePrintableString(innerBytes, params.lax, params.name)
case TagNumericString:
v, err = parseNumericString(innerBytes, params.name)
case TagIA5String:
@@ -1033,6 +1082,9 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
// that allow the encoding to change midstring and
// such. We give up and pass it as an 8-bit string.
v, err = parseT61String(innerBytes)
+ case TagBMPString:
+ v, err = parseBMPString(innerBytes)
+
default:
err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag), params.name}
}
@@ -1110,11 +1162,13 @@ func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
// The following tags on struct fields have special meaning to Unmarshal:
//
// application specifies that an APPLICATION tag is used
+// private specifies that a PRIVATE tag is used
// default:x sets the default value for optional integer fields (only used if optional is also present)
// explicit specifies that an additional, explicit tag wraps the implicit one
// optional marks the field as ASN.1 OPTIONAL
// set causes a SET, rather than a SEQUENCE type to be expected
// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+// lax relax strict encoding checks for this field, and for any fields within it
//
// If the type of the first field of a structure is RawContent then the raw
// ASN1 contents of the struct will be stored in it.
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/common.go b/vendor/github.com/google/certificate-transparency-go/asn1/common.go
index 3c40856bec..982d06c09e 100644
--- a/vendor/github.com/google/certificate-transparency-go/asn1/common.go
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/common.go
@@ -37,6 +37,7 @@ const (
TagUTCTime = 23
TagGeneralizedTime = 24
TagGeneralString = 27
+ TagBMPString = 30
)
// ASN.1 class types represent the namespace of the tag.
@@ -75,12 +76,14 @@ type fieldParameters struct {
optional bool // true iff the field is OPTIONAL
explicit bool // true iff an EXPLICIT tag is in use.
application bool // true iff an APPLICATION tag is in use.
+ private bool // true iff a PRIVATE tag is in use.
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
stringType int // the string tag to use when marshaling.
timeType int // the time tag to use when marshaling.
set bool // true iff this should be encoded as a SET
omitEmpty bool // true iff this should be omitted if empty when marshaling.
+ lax bool // true iff unmarshalling should skip some error checks
name string // name of field for better diagnostics
// Invariants:
@@ -131,8 +134,15 @@ func parseFieldParameters(str string) (ret fieldParameters) {
if ret.tag == nil {
ret.tag = new(int)
}
+ case part == "private":
+ ret.private = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
case part == "omitempty":
ret.omitEmpty = true
+ case part == "lax":
+ ret.lax = true
}
}
return
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go
index 22591282f6..9801b065a1 100644
--- a/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go
@@ -631,6 +631,8 @@ func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) {
if params.tag != nil {
if params.application {
class = ClassApplication
+ } else if params.private {
+ class = ClassPrivate
} else {
class = ClassContextSpecific
}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
index 2e55408452..826b7253ea 100644
--- a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
+++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
@@ -1,60 +1,85 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: multilog.proto
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.20.1
+// source: client/configpb/multilog.proto
package configpb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import timestamp "github.com/golang/protobuf/ptypes/timestamp"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// TemporalLogConfig is a set of LogShardConfig messages, whose
// time limits should be contiguous.
type TemporalLogConfig struct {
- Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
-func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
-func (*TemporalLogConfig) ProtoMessage() {}
-func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_multilog_3c9b797b88da6f07, []int{0}
-}
-func (m *TemporalLogConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TemporalLogConfig.Unmarshal(m, b)
+ Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
}
-func (m *TemporalLogConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TemporalLogConfig.Marshal(b, m, deterministic)
-}
-func (dst *TemporalLogConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TemporalLogConfig.Merge(dst, src)
+
+func (x *TemporalLogConfig) Reset() {
+ *x = TemporalLogConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_client_configpb_multilog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *TemporalLogConfig) XXX_Size() int {
- return xxx_messageInfo_TemporalLogConfig.Size(m)
+
+func (x *TemporalLogConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *TemporalLogConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_TemporalLogConfig.DiscardUnknown(m)
+
+func (*TemporalLogConfig) ProtoMessage() {}
+
+func (x *TemporalLogConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_client_configpb_multilog_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_TemporalLogConfig proto.InternalMessageInfo
+// Deprecated: Use TemporalLogConfig.ProtoReflect.Descriptor instead.
+func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
+ return file_client_configpb_multilog_proto_rawDescGZIP(), []int{0}
+}
-func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
- if m != nil {
- return m.Shard
+func (x *TemporalLogConfig) GetShard() []*LogShardConfig {
+ if x != nil {
+ return x.Shard
}
return nil
}
@@ -62,97 +87,192 @@ func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
// LogShardConfig describes the acceptable date range for a single shard of a temporal
// log.
type LogShardConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
// The log's public key in DER-encoded PKIX form.
PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
// not_after_start defines the start of the range of acceptable NotAfter
// values, inclusive.
// Leaving this unset implies no lower bound to the range.
- NotAfterStart *timestamp.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
+ NotAfterStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
// not_after_limit defines the end of the range of acceptable NotAfter values,
// exclusive.
// Leaving this unset implies no upper bound to the range.
- NotAfterLimit *timestamp.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ NotAfterLimit *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
}
-func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
-func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
-func (*LogShardConfig) ProtoMessage() {}
-func (*LogShardConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_multilog_3c9b797b88da6f07, []int{1}
-}
-func (m *LogShardConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogShardConfig.Unmarshal(m, b)
-}
-func (m *LogShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogShardConfig.Marshal(b, m, deterministic)
-}
-func (dst *LogShardConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogShardConfig.Merge(dst, src)
+func (x *LogShardConfig) Reset() {
+ *x = LogShardConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_client_configpb_multilog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *LogShardConfig) XXX_Size() int {
- return xxx_messageInfo_LogShardConfig.Size(m)
+
+func (x *LogShardConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LogShardConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_LogShardConfig.DiscardUnknown(m)
+
+func (*LogShardConfig) ProtoMessage() {}
+
+func (x *LogShardConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_client_configpb_multilog_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_LogShardConfig proto.InternalMessageInfo
+// Deprecated: Use LogShardConfig.ProtoReflect.Descriptor instead.
+func (*LogShardConfig) Descriptor() ([]byte, []int) {
+ return file_client_configpb_multilog_proto_rawDescGZIP(), []int{1}
+}
-func (m *LogShardConfig) GetUri() string {
- if m != nil {
- return m.Uri
+func (x *LogShardConfig) GetUri() string {
+ if x != nil {
+ return x.Uri
}
return ""
}
-func (m *LogShardConfig) GetPublicKeyDer() []byte {
- if m != nil {
- return m.PublicKeyDer
+func (x *LogShardConfig) GetPublicKeyDer() []byte {
+ if x != nil {
+ return x.PublicKeyDer
}
return nil
}
-func (m *LogShardConfig) GetNotAfterStart() *timestamp.Timestamp {
- if m != nil {
- return m.NotAfterStart
+func (x *LogShardConfig) GetNotAfterStart() *timestamppb.Timestamp {
+ if x != nil {
+ return x.NotAfterStart
}
return nil
}
-func (m *LogShardConfig) GetNotAfterLimit() *timestamp.Timestamp {
- if m != nil {
- return m.NotAfterLimit
+func (x *LogShardConfig) GetNotAfterLimit() *timestamppb.Timestamp {
+ if x != nil {
+ return x.NotAfterLimit
}
return nil
}
-func init() {
- proto.RegisterType((*TemporalLogConfig)(nil), "configpb.TemporalLogConfig")
- proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig")
-}
-
-func init() { proto.RegisterFile("multilog.proto", fileDescriptor_multilog_3c9b797b88da6f07) }
-
-var fileDescriptor_multilog_3c9b797b88da6f07 = []byte{
- // 241 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
- 0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a,
- 0xe5, 0x0b, 0xa0, 0x6c, 0x64, 0x4a, 0xbb, 0x47, 0x4e, 0xeb, 0x18, 0x0b, 0x3b, 0xcf, 0x72, 0x5e,
- 0x86, 0xfe, 0x25, 0x9f, 0x84, 0x1c, 0x2b, 0x43, 0x37, 0xb6, 0xa7, 0x77, 0xcf, 0xb9, 0xd2, 0xa5,
- 0xb9, 0x1b, 0x2d, 0x1a, 0x0b, 0x5a, 0xf8, 0x00, 0x08, 0xec, 0xee, 0x08, 0x7d, 0x67, 0xb4, 0x6f,
- 0x57, 0x2f, 0x1a, 0x40, 0x5b, 0xb5, 0x99, 0xfe, 0xed, 0xd8, 0x6d, 0xd0, 0x38, 0x35, 0xa0, 0x74,
- 0x3e, 0xa1, 0xeb, 0x1d, 0x7d, 0x3e, 0x28, 0xe7, 0x21, 0x48, 0x5b, 0x81, 0xde, 0x4d, 0x1e, 0x13,
- 0xf4, 0x66, 0xf8, 0x96, 0xe1, 0xc4, 0x49, 0x91, 0x95, 0x8b, 0x2d, 0x17, 0x73, 0x9f, 0xa8, 0x40,
- 0xef, 0x63, 0x92, 0xc0, 0x3a, 0x61, 0xeb, 0x5f, 0x42, 0xf3, 0xcb, 0x84, 0x3d, 0xd1, 0x6c, 0x0c,
- 0x86, 0x93, 0x82, 0x94, 0xf7, 0x75, 0x3c, 0xd9, 0x2b, 0xcd, 0xfd, 0xd8, 0x5a, 0x73, 0x6c, 0x7e,
- 0xd4, 0xb9, 0x39, 0xa9, 0xc0, 0xaf, 0x0a, 0x52, 0x2e, 0xeb, 0x65, 0xfa, 0x7e, 0xa9, 0xf3, 0xa7,
- 0x0a, 0xec, 0x83, 0x3e, 0xf6, 0x80, 0x8d, 0xec, 0x50, 0x85, 0x66, 0x40, 0x19, 0x90, 0x67, 0x05,
- 0x29, 0x17, 0xdb, 0x95, 0x48, 0x53, 0xc4, 0x3c, 0x45, 0x1c, 0xe6, 0x29, 0xf5, 0x43, 0x0f, 0xf8,
- 0x1e, 0x8d, 0x7d, 0x14, 0x2e, 0x3b, 0xac, 0x71, 0x06, 0xf9, 0xf5, 0xff, 0x3b, 0xaa, 0x28, 0xb4,
- 0xb7, 0x13, 0xf2, 0xf6, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd9, 0x50, 0x5b, 0x5b, 0x01, 0x00,
- 0x00,
+var File_client_configpb_multilog_proto protoreflect.FileDescriptor
+
+var file_client_configpb_multilog_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70,
+ 0x62, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x11, 0x54,
+ 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x22, 0xd0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f,
+ 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12,
+ 0x42, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4c, 0x69,
+ 0x6d, 0x69, 0x74, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63,
+ 0x79, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x6c, 0x6f, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_client_configpb_multilog_proto_rawDescOnce sync.Once
+ file_client_configpb_multilog_proto_rawDescData = file_client_configpb_multilog_proto_rawDesc
+)
+
+func file_client_configpb_multilog_proto_rawDescGZIP() []byte {
+ file_client_configpb_multilog_proto_rawDescOnce.Do(func() {
+ file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_configpb_multilog_proto_rawDescData)
+ })
+ return file_client_configpb_multilog_proto_rawDescData
+}
+
+var file_client_configpb_multilog_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_client_configpb_multilog_proto_goTypes = []interface{}{
+ (*TemporalLogConfig)(nil), // 0: configpb.TemporalLogConfig
+ (*LogShardConfig)(nil), // 1: configpb.LogShardConfig
+ (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
+}
+var file_client_configpb_multilog_proto_depIdxs = []int32{
+ 1, // 0: configpb.TemporalLogConfig.shard:type_name -> configpb.LogShardConfig
+ 2, // 1: configpb.LogShardConfig.not_after_start:type_name -> google.protobuf.Timestamp
+ 2, // 2: configpb.LogShardConfig.not_after_limit:type_name -> google.protobuf.Timestamp
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_client_configpb_multilog_proto_init() }
+func file_client_configpb_multilog_proto_init() {
+ if File_client_configpb_multilog_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_client_configpb_multilog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TemporalLogConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_client_configpb_multilog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogShardConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_client_configpb_multilog_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_client_configpb_multilog_proto_goTypes,
+ DependencyIndexes: file_client_configpb_multilog_proto_depIdxs,
+ MessageInfos: file_client_configpb_multilog_proto_msgTypes,
+ }.Build()
+ File_client_configpb_multilog_proto = out.File
+ file_client_configpb_multilog_proto_rawDesc = nil
+ file_client_configpb_multilog_proto_goTypes = nil
+ file_client_configpb_multilog_proto_depIdxs = nil
}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto
index b396a90a9c..0774c35e21 100644
--- a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto
+++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,6 +16,8 @@ syntax = "proto3";
package configpb;
+option go_package = "github.com/google/certificate-transparency-go/client/multilog/configpb";
+
import "google/protobuf/timestamp.proto";
// TemporalLogConfig is a set of LogShardConfig messages, whose
diff --git a/vendor/github.com/google/certificate-transparency-go/client/getentries.go b/vendor/github.com/google/certificate-transparency-go/client/getentries.go
index e2cde55c22..103dc81580 100644
--- a/vendor/github.com/google/certificate-transparency-go/client/getentries.go
+++ b/vendor/github.com/google/certificate-transparency-go/client/getentries.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
+// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -36,16 +36,9 @@ func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.Ge
"start": strconv.FormatInt(start, 10),
"end": strconv.FormatInt(end, 10),
}
- if ctx == nil {
- ctx = context.TODO()
- }
var resp ct.GetEntriesResponse
- httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp)
- if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
+ if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil {
return nil, err
}
@@ -66,7 +59,7 @@ func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogE
for i, entry := range resp.Entries {
index := start + int64(i)
logEntry, err := ct.LogEntryFromLeaf(index, &entry)
- if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
+ if x509.IsFatal(err) {
return nil, err
}
entries[i] = *logEntry
diff --git a/vendor/github.com/google/certificate-transparency-go/client/logclient.go b/vendor/github.com/google/certificate-transparency-go/client/logclient.go
index a79ef3083c..7842c8e288 100644
--- a/vendor/github.com/google/certificate-transparency-go/client/logclient.go
+++ b/vendor/github.com/google/certificate-transparency-go/client/logclient.go
@@ -1,4 +1,4 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
+// Copyright 2014 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -56,18 +56,8 @@ func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, erro
return &LogClient{*logClient}, err
}
-// RspError represents an error that occurred when processing a response from a server,
-// and also includes key details from the http.Response that triggered the error.
-type RspError struct {
- Err error
- StatusCode int
- Body []byte
-}
-
-// Error formats the RspError instance, focusing on the error.
-func (e RspError) Error() string {
- return e.Err.Error()
-}
+// RspError represents a server error including HTTP information.
+type RspError = jsonclient.RspError
// Attempts to add |chain| to the log, using the api end-point specified by
// |path|. If provided context expires before submission is complete an
@@ -81,9 +71,6 @@ func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType
httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp)
if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
return nil, err
}
@@ -132,38 +119,6 @@ func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.S
return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain)
}
-// AddJSON submits arbitrary data to to XJSON server.
-func (c *LogClient) AddJSON(ctx context.Context, data interface{}) (*ct.SignedCertificateTimestamp, error) {
- req := ct.AddJSONRequest{Data: data}
- var resp ct.AddChainResponse
- httpRsp, body, err := c.PostAndParse(ctx, ct.AddJSONPath, &req, &resp)
- if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
- return nil, err
- }
- var ds ct.DigitallySigned
- if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- } else if len(rest) > 0 {
- return nil, RspError{
- Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
- StatusCode: httpRsp.StatusCode,
- Body: body,
- }
- }
- var logID ct.LogID
- copy(logID.KeyID[:], resp.ID)
- return &ct.SignedCertificateTimestamp{
- SCTVersion: resp.SCTVersion,
- LogID: logID,
- Timestamp: resp.Timestamp,
- Extensions: ct.CTExtensions(resp.Extensions),
- Signature: ds,
- }, nil
-}
-
// GetSTH retrieves the current STH from the log.
// Returns a populated SignedTreeHead, or a non-nil error (which may be of type
// RspError if a raw http.Response is available).
@@ -171,9 +126,6 @@ func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) {
var resp ct.GetSTHResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp)
if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
return nil, err
}
@@ -220,11 +172,7 @@ func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64)
"second": strconv.FormatUint(second, base10),
}
var resp ct.GetSTHConsistencyResponse
- httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp)
- if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
+ if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil {
return nil, err
}
return resp.Consistency, nil
@@ -239,11 +187,7 @@ func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize ui
"hash": b64Hash,
}
var resp ct.GetProofByHashResponse
- httpRsp, body, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp)
- if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
+ if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil {
return nil, err
}
return &resp, nil
@@ -254,9 +198,6 @@ func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
var resp ct.GetRootsResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp)
if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
return nil, err
}
var roots []ct.ASN1Cert
@@ -278,11 +219,7 @@ func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64
"tree_size": strconv.FormatUint(treeSize, base10),
}
var resp ct.GetEntryAndProofResponse
- httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp)
- if err != nil {
- if httpRsp != nil {
- return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
- }
+ if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil {
return nil, err
}
return &resp, nil
diff --git a/vendor/github.com/google/certificate-transparency-go/client/multilog.go b/vendor/github.com/google/certificate-transparency-go/client/multilog.go
index a4860b6d20..afd75a6db4 100644
--- a/vendor/github.com/google/certificate-transparency-go/client/multilog.go
+++ b/vendor/github.com/google/certificate-transparency-go/client/multilog.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -19,16 +19,16 @@ import (
"crypto/sha256"
"errors"
"fmt"
- "io/ioutil"
"net/http"
+ "os"
"time"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/client/configpb"
"github.com/google/certificate-transparency-go/jsonclient"
"github.com/google/certificate-transparency-go/x509"
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
)
type interval struct {
@@ -43,14 +43,16 @@ func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, er
return nil, errors.New("log config filename empty")
}
- cfgText, err := ioutil.ReadFile(filename)
+ cfgBytes, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("failed to read log config: %v", err)
}
var cfg configpb.TemporalLogConfig
- if err := proto.UnmarshalText(string(cfgText), &cfg); err != nil {
- return nil, fmt.Errorf("failed to parse log config: %v", err)
+ if txtErr := prototext.Unmarshal(cfgBytes, &cfg); txtErr != nil {
+ if binErr := proto.Unmarshal(cfgBytes, &cfg); binErr != nil {
+ return nil, fmt.Errorf("failed to parse TemporalLogConfig from %q as text protobuf (%v) or binary protobuf (%v)", filename, txtErr, binErr)
+ }
}
if len(cfg.Shard) == 0 {
@@ -76,8 +78,8 @@ type TemporalLogClient struct {
// NewTemporalLogClient builds a new client for interacting with a temporal log.
// The provided config should be contiguous and chronological.
-func NewTemporalLogClient(cfg configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) {
- if len(cfg.Shard) == 0 {
+func NewTemporalLogClient(cfg *configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) {
+ if len(cfg.GetShard()) == 0 {
return nil, errors.New("empty config")
}
@@ -106,7 +108,7 @@ func NewTemporalLogClient(cfg configpb.TemporalLogConfig, hc *http.Client) (*Tem
}
clients := make([]*LogClient, 0, len(cfg.Shard))
for i, shard := range cfg.Shard {
- opts := jsonclient.Options{}
+ opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"}
opts.PublicKeyDER = shard.GetPublicKeyDer()
c, err := New(shard.Uri, hc, opts)
if err != nil {
@@ -200,17 +202,17 @@ func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) {
func shardInterval(cfg *configpb.LogShardConfig) (interval, error) {
var interval interval
if cfg.NotAfterStart != nil {
- t, err := ptypes.Timestamp(cfg.NotAfterStart)
- if err != nil {
+ if err := cfg.NotAfterStart.CheckValid(); err != nil {
return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err)
}
+ t := cfg.NotAfterStart.AsTime()
interval.lower = &t
}
if cfg.NotAfterLimit != nil {
- t, err := ptypes.Timestamp(cfg.NotAfterLimit)
- if err != nil {
+ if err := cfg.NotAfterLimit.CheckValid(); err != nil {
return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err)
}
+ t := cfg.NotAfterLimit.AsTime()
interval.upper = &t
}
diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml
new file mode 100644
index 0000000000..37610aae2c
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml
@@ -0,0 +1,201 @@
+#############################################################################
+## The top section of this file is identical in the 3 cloudbuild.*yaml files.
+## Make sure any edits you make here are copied over to the other files too
+## if appropriate.
+##
+## TODO(al): consider if it's possible to merge these 3 files and control via
+## substitutions.
+#############################################################################
+
+timeout: 1200s
+options:
+ machineType: N1_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GO111MODULE=on
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/google/certificate-transparency-go
+ - GOPATH=/go
+
+substitutions:
+ _CLUSTER_NAME: trillian-opensource-ci
+ _MASTER_ZONE: us-central1-a
+
+steps:
+# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps:
+- name: 'gcr.io/cloud-builders/docker'
+ entrypoint: 'bash'
+ args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0']
+- name: 'gcr.io/cloud-builders/docker'
+ args: [
+ 'build',
+ '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '-f', './integration/Dockerfile',
+ '.'
+ ]
+
+# prepare spins up an ephemeral trillian instance for testing use.
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ entrypoint: 'bash'
+ id: 'prepare'
+ args:
+ - '-exc'
+ - |
+ # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders.
+ docker pull gcr.io/$PROJECT_ID/log_server:latest
+ docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server
+ docker pull gcr.io/$PROJECT_ID/log_signer:latest
+ docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer
+
+ # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo:
+ export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)"
+
+ # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it:
+ echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml
+
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer
+
+# Install proto related bits and block on Trillian being ready
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci-ready'
+ entrypoint: 'bash'
+ args:
+ - '-ec'
+ - |
+ go install \
+ github.com/golang/protobuf/proto \
+ github.com/golang/protobuf/protoc-gen-go \
+ github.com/golang/mock/mockgen \
+ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \
+ github.com/fullstorydev/grpcurl/cmd/grpcurl
+
+
+ # Cache all the modules we'll need too
+ go mod download
+ go test -i ./...
+
+ # Wait for trillian logserver to be up
+ until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done
+ waitFor: ['prepare']
+
+# Run the presubmit tests
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'default_test'
+ env:
+ - 'GOFLAGS='
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'race_detection'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_coverage'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --coverage'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_race'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'with_pkcs11_and_race'
+ env:
+ - 'GOFLAGS=-race --tags=pkcs11'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_PKCS11=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+# Collect and submit codecoverage reports
+- name: 'gcr.io/cloud-builders/curl'
+ id: 'codecov.io'
+ entrypoint: bash
+ args: ['-c', 'bash <(curl -s https://codecov.io/bash)']
+ env:
+ - 'VCS_COMMIT_ID=$COMMIT_SHA'
+ - 'VCS_BRANCH_NAME=$BRANCH_NAME'
+ - 'VCS_PULL_REQUEST=$_PR_NUMBER'
+ - 'CI_BUILD_ID=$BUILD_ID'
+ - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger
+ waitFor: ['etcd_with_coverage']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci_complete'
+ entrypoint: /bin/true
+ waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race']
+
+############################################################################
+## End of replicated section.
+## Below are deployment specific steps for the CD env.
+############################################################################
+
+- id: build_ctfe
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - --file=trillian/examples/deployment/docker/ctfe/Dockerfile
+ - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ - --cache-from=gcr.io/${PROJECT_ID}/ctfe
+ - .
+ waitFor: [-]
+- id: build_envsubst
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - trillian/examples/deployment/docker/envsubst
+ - -t
+ - envsubst
+ waitFor: ['ci_complete']
+- id: envsubst_kubernetes_configs
+ name: envsubst
+ args:
+ - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - PROJECT_ID=${PROJECT_ID}
+ - IMAGE_TAG=${COMMIT_SHA}
+ waitFor:
+ - build_envsubst
+- id: update_kubernetes_configs_dryrun
+ name: gcr.io/cloud-builders/kubectl
+ args:
+ - apply
+ - --dry-run=server
+ - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
+ - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
+ waitFor:
+ - envsubst_kubernetes_configs
+ - build_ctfe
+
+images:
+- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+- gcr.io/${PROJECT_ID}/ct_testbase:latest
diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml
new file mode 100644
index 0000000000..6b902c351d
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml
@@ -0,0 +1,217 @@
+#############################################################################
+## The top section of this file is identical in the 3 cloudbuild.*yaml files.
+## Make sure any edits you make here are copied over to the other files too
+## if appropriate.
+##
+## TODO(al): consider if it's possible to merge these 3 files and control via
+## substitutions.
+#############################################################################
+
+timeout: 1200s
+options:
+ machineType: N1_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GO111MODULE=on
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/google/certificate-transparency-go
+ - GOPATH=/go
+
+substitutions:
+ _CLUSTER_NAME: trillian-opensource-ci
+ _MASTER_ZONE: us-central1-a
+
+steps:
+# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps:
+- name: 'gcr.io/cloud-builders/docker'
+ entrypoint: 'bash'
+ args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0']
+- name: 'gcr.io/cloud-builders/docker'
+ args: [
+ 'build',
+ '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '-f', './integration/Dockerfile',
+ '.'
+ ]
+
+# prepare spins up an ephemeral trillian instance for testing use.
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ entrypoint: 'bash'
+ id: 'prepare'
+ args:
+ - '-exc'
+ - |
+ # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders.
+ docker pull gcr.io/$PROJECT_ID/log_server:latest
+ docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server
+ docker pull gcr.io/$PROJECT_ID/log_signer:latest
+ docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer
+
+ # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo:
+ export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)"
+
+ # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it:
+ echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml
+
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer
+
+# Install proto related bits and block on Trillian being ready
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci-ready'
+ entrypoint: 'bash'
+ args:
+ - '-ec'
+ - |
+ go install \
+ github.com/golang/protobuf/proto \
+ github.com/golang/protobuf/protoc-gen-go \
+ github.com/golang/mock/mockgen \
+ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \
+ github.com/fullstorydev/grpcurl/cmd/grpcurl
+
+
+ # Cache all the modules we'll need too
+ go mod download
+ go test -i ./...
+
+ # Wait for trillian logserver to be up
+ until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done
+ waitFor: ['prepare']
+
+# Run the presubmit tests
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'default_test'
+ env:
+ - 'GOFLAGS='
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'race_detection'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_coverage'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --coverage'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_race'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'with_pkcs11_and_race'
+ env:
+ - 'GOFLAGS=-race --tags=pkcs11'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_PKCS11=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+# Collect and submit codecoverage reports
+- name: 'gcr.io/cloud-builders/curl'
+ id: 'codecov.io'
+ entrypoint: bash
+ args: ['-c', 'bash <(curl -s https://codecov.io/bash)']
+ env:
+ - 'VCS_COMMIT_ID=$COMMIT_SHA'
+ - 'VCS_BRANCH_NAME=$BRANCH_NAME'
+ - 'VCS_PULL_REQUEST=$_PR_NUMBER'
+ - 'CI_BUILD_ID=$BUILD_ID'
+ - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger
+ waitFor: ['etcd_with_coverage']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci_complete'
+ entrypoint: /bin/true
+ waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race']
+
+############################################################################
+## End of replicated section.
+## Below are deployment specific steps for the CD env.
+############################################################################
+
+- id: build_ctfe
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - --file=trillian/examples/deployment/docker/ctfe/Dockerfile
+ - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ - --cache-from=gcr.io/${PROJECT_ID}/ctfe
+ - .
+ waitFor: ["-"]
+- id: push_ctfe
+ name: gcr.io/cloud-builders/docker
+ args:
+ - push
+ - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ waitFor:
+ - build_ctfe
+- id: tag_latest_ctfe
+ name: gcr.io/cloud-builders/gcloud
+ args:
+ - container
+ - images
+ - add-tag
+ - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ - gcr.io/${PROJECT_ID}/ctfe:latest
+ waitFor:
+ - push_ctfe
+- id: build_envsubst
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - trillian/examples/deployment/docker/envsubst
+ - -t
+ - envsubst
+ waitFor: ["-"]
+- id: envsubst_kubernetes_configs
+ name: envsubst
+ args:
+ - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - PROJECT_ID=${PROJECT_ID}
+ - IMAGE_TAG=${COMMIT_SHA}
+ waitFor:
+ - build_envsubst
+- id: update_kubernetes_configs
+ name: gcr.io/cloud-builders/kubectl
+ args:
+ - apply
+ - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
+ - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
+ waitFor:
+ - envsubst_kubernetes_configs
+ - push_ctfe
+
+images:
+- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+- gcr.io/${PROJECT_ID}/ct_testbase:latest
diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
index 8c8c5ab6f8..33585255f2 100644
--- a/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
+++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
@@ -1,10 +1,167 @@
+#############################################################################
+## The top section of this file is identical in the 3 cloudbuild.*yaml files.
+## Make sure any edits you make here are copied over to the other files too
+## if appropriate.
+##
+## TODO(al): consider if it's possible to merge these 3 files and control via
+## substitutions.
+#############################################################################
+
+timeout: 1200s
+options:
+ machineType: N1_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GO111MODULE=on
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/google/certificate-transparency-go
+ - GOPATH=/go
+
+substitutions:
+ _CLUSTER_NAME: trillian-opensource-ci
+ _MASTER_ZONE: us-central1-a
+
steps:
+# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps:
+- name: 'gcr.io/cloud-builders/docker'
+ entrypoint: 'bash'
+ args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0']
+- name: 'gcr.io/cloud-builders/docker'
+ args: [
+ 'build',
+ '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '-f', './integration/Dockerfile',
+ '.'
+ ]
+
+# prepare spins up an ephemeral trillian instance for testing use.
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ entrypoint: 'bash'
+ id: 'prepare'
+ args:
+ - '-exc'
+ - |
+ # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders.
+ docker pull gcr.io/$PROJECT_ID/log_server:latest
+ docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server
+ docker pull gcr.io/$PROJECT_ID/log_signer:latest
+ docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer
+
+ # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo:
+ export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)"
+
+ # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it:
+ echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml
+
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer
+
+# Install proto related bits and block on Trillian being ready
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci-ready'
+ entrypoint: 'bash'
+ args:
+ - '-ec'
+ - |
+ go install \
+ github.com/golang/protobuf/proto \
+ github.com/golang/protobuf/protoc-gen-go \
+ github.com/golang/mock/mockgen \
+ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \
+ github.com/fullstorydev/grpcurl/cmd/grpcurl
+
+
+ # Cache all the modules we'll need too
+ go mod download
+ go test -i ./...
+
+ # Wait for trillian logserver to be up
+ until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done
+ waitFor: ['prepare']
+
+# Run the presubmit tests
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'default_test'
+ env:
+ - 'GOFLAGS='
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'race_detection'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_coverage'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --coverage'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_race'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'with_pkcs11_and_race'
+ env:
+ - 'GOFLAGS=-race --tags=pkcs11'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_PKCS11=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+# Collect and submit codecoverage reports
+- name: 'gcr.io/cloud-builders/curl'
+ id: 'codecov.io'
+ entrypoint: bash
+ args: ['-c', 'bash <(curl -s https://codecov.io/bash)']
+ env:
+ - 'VCS_COMMIT_ID=$COMMIT_SHA'
+ - 'VCS_BRANCH_NAME=$BRANCH_NAME'
+ - 'VCS_PULL_REQUEST=$_PR_NUMBER'
+ - 'CI_BUILD_ID=$BUILD_ID'
+ - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger
+ waitFor: ['etcd_with_coverage']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci_complete'
+ entrypoint: /bin/true
+ waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race']
+
+############################################################################
+## End of replicated section.
+## Below are deployment specific steps for the CD env.
+############################################################################
+
- id: build_ctfe
name: gcr.io/cloud-builders/docker
args:
- build
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
- --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
+ - --cache-from=gcr.io/${PROJECT_ID}/ctfe
- .
+
images:
- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
+- gcr.io/${PROJECT_ID}/ct_testbase:latest
diff --git a/vendor/github.com/google/certificate-transparency-go/codecov.yml b/vendor/github.com/google/certificate-transparency-go/codecov.yml
new file mode 100644
index 0000000000..7269ff2715
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/codecov.yml
@@ -0,0 +1,19 @@
+# Customizations to codecov for c-t-go repo. This will be merged into
+# the team / default codecov yaml file.
+#
+# Validate changes with:
+# curl --data-binary @codecov.yml https://codecov.io/validate
+
+# Exclude code that's for testing, demos or utilities that aren't really
+# part of production releases.
+ignore:
+ - "**/mock_*.go"
+ - "**/testonly"
+ - "trillian/integration"
+
+coverage:
+ status:
+ project:
+ default:
+ # Allow 1% coverage drop without complaining, to avoid being too noisy.
+ threshold: 1%
diff --git a/vendor/github.com/google/certificate-transparency-go/gometalinter.json b/vendor/github.com/google/certificate-transparency-go/gometalinter.json
deleted file mode 100644
index 4eba1b63c8..0000000000
--- a/vendor/github.com/google/certificate-transparency-go/gometalinter.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
- "Deadline": "60s",
- "Linters": {
- "license": "./scripts/check_license.sh:PATH:LINE:MESSAGE",
- "forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE",
- "unforked": "./scripts/check_unforked.sh:PATH:LINE:MESSAGE"
- },
- "Enable": [
- "forked",
- "gocyclo",
- "gofmt",
- "goimports",
- "golint",
- "license",
- "misspell",
- "unforked",
- "vet"
- ],
- "Exclude": [
- "x509/",
- "asn1/",
- ".+\\.pb\\.go",
- ".+\\.pb\\.gw\\.go",
- "mock_.+\\.go"
- ],
- "Cyclo": 40,
- "Vendor": true
-}
diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go
index 0c969d094e..30932f30d1 100644
--- a/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go
+++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go
index c34fa833d5..c3cf8515d0 100644
--- a/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go
+++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
+// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"log"
"math/rand"
"net/http"
@@ -33,6 +33,7 @@ import (
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/x509"
"golang.org/x/net/context/ctxhttp"
+ "k8s.io/klog/v2"
)
const maxJitter = 250 * time.Millisecond
@@ -58,6 +59,7 @@ type JSONClient struct {
Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
logger Logger // interface to use for logging warnings and errors
backoff backoffer // object used to store and calculate backoff information
+ userAgent string // If set, this is sent as the UserAgent header.
}
// Logger is a simple logging interface used to log internal errors and warnings
@@ -75,6 +77,8 @@ type Options struct {
PublicKey string
// DER format public key to use for signature verification.
PublicKeyDER []byte
+ // UserAgent, if set, will be sent as the User-Agent header with each request.
+ UserAgent string
}
// ParsePublicKey parses and returns the public key contained in opts.
@@ -105,6 +109,19 @@ func (bl *basicLogger) Printf(msg string, args ...interface{}) {
log.Printf(msg, args...)
}
+// RspError represents an error that occurred when processing a response from a server,
+// and also includes key details from the http.Response that triggered the error.
+type RspError struct {
+ Err error
+ StatusCode int
+ Body []byte
+}
+
+// Error formats the RspError instance, focusing on the error.
+func (e RspError) Error() string {
+ return e.Err.Error()
+}
+
// New constructs a new JSONClient instance, for the given base URI, using the
// given http.Client object (if provided) and the Options object.
// If opts does not specify a public key, signatures will not be verified.
@@ -136,6 +153,7 @@ func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) {
Verifier: verifier,
logger: logger,
backoff: &backoff{},
+ userAgent: opts.UserAgent,
}, nil
}
@@ -144,11 +162,10 @@ func (c *JSONClient) BaseURI() string {
return c.uri
}
-// GetAndParse makes a HTTP GET call to the given path, and attempta to parse
+// GetAndParse makes a HTTP GET call to the given path, and attempts to parse
// the response as a JSON representation of the rsp structure. Returns the
-// http.Response, the body of the response, and an error. Note that the
-// returned http.Response can be non-nil even when an error is returned,
-// in particular when the HTTP status is not OK or when the JSON parsing fails.
+// http.Response, the body of the response, and an error (which may be of
+// type RspError if the HTTP response was available).
func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
@@ -159,10 +176,14 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
vals.Add(k, v)
}
fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode())
+ klog.V(2).Infof("GET %s", fullURI)
httpReq, err := http.NewRequest(http.MethodGet, fullURI, nil)
if err != nil {
return nil, nil, err
}
+ if len(c.userAgent) != 0 {
+ httpReq.Header.Set("User-Agent", c.userAgent)
+ }
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
if err != nil {
@@ -170,18 +191,18 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
}
// Read everything now so http.Client can reuse the connection.
- body, err := ioutil.ReadAll(httpRsp.Body)
+ body, err := io.ReadAll(httpRsp.Body)
httpRsp.Body.Close()
if err != nil {
- return httpRsp, body, fmt.Errorf("failed to read response body: %v", err)
+ return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %v", err), StatusCode: httpRsp.StatusCode, Body: body}
}
if httpRsp.StatusCode != http.StatusOK {
- return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
+ return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body}
}
if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil {
- return httpRsp, body, err
+ return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return httpRsp, body, nil
@@ -190,9 +211,7 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
// PostAndParse makes a HTTP POST call to the given path, including the request
// parameters, and attempts to parse the response as a JSON representation of
// the rsp structure. Returns the http.Response, the body of the response, and
-// an error. Note that the returned http.Response can be non-nil even when an
-// error is returned, in particular when the HTTP status is not OK or when the
-// JSON parsing fails.
+// an error (which may be of type RspError if the HTTP response was available).
func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
@@ -203,10 +222,14 @@ func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp int
return nil, nil, err
}
fullURI := fmt.Sprintf("%s%s", c.uri, path)
+ klog.V(2).Infof("POST %s", fullURI)
httpReq, err := http.NewRequest(http.MethodPost, fullURI, bytes.NewReader(postBody))
if err != nil {
return nil, nil, err
}
+ if len(c.userAgent) != 0 {
+ httpReq.Header.Set("User-Agent", c.userAgent)
+ }
httpReq.Header.Set("Content-Type", "application/json")
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
@@ -214,16 +237,19 @@ func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp int
// Read all of the body, if there is one, so that the http.Client can do Keep-Alive.
var body []byte
if httpRsp != nil {
- body, err = ioutil.ReadAll(httpRsp.Body)
+ body, err = io.ReadAll(httpRsp.Body)
httpRsp.Body.Close()
}
if err != nil {
- return httpRsp, body, err
+ if httpRsp != nil {
+ return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err}
+ }
+ return nil, nil, err
}
if httpRsp.StatusCode == http.StatusOK {
if err = json.Unmarshal(body, &rsp); err != nil {
- return httpRsp, body, err
+ return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err}
}
}
return httpRsp, body, nil
@@ -260,15 +286,17 @@ func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req
return nil, nil, err
}
wait := c.backoff.set(nil)
- c.logger.Printf("Request failed, backing-off for %s: %s", wait, err)
+ c.logger.Printf("Request to %s failed, backing-off %s: %s", c.uri, wait, err)
} else {
switch {
case httpRsp.StatusCode == http.StatusOK:
return httpRsp, body, nil
case httpRsp.StatusCode == http.StatusRequestTimeout:
// Request timeout, retry immediately
- c.logger.Printf("Request timed out, retrying immediately")
+ c.logger.Printf("Request to %s timed out, retrying immediately", c.uri)
case httpRsp.StatusCode == http.StatusServiceUnavailable:
+ fallthrough
+ case httpRsp.StatusCode == http.StatusTooManyRequests:
var backoff *time.Duration
// Retry-After may be either a number of seconds as a int or a RFC 1123
// date string (RFC 7231 Section 7.1.3)
@@ -277,14 +305,17 @@ func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req
b := time.Duration(seconds) * time.Second
backoff = &b
} else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil {
- b := date.Sub(time.Now())
+ b := time.Until(date)
backoff = &b
}
}
wait := c.backoff.set(backoff)
- c.logger.Printf("Request failed, backing-off for %s: got HTTP status %s", wait, httpRsp.Status)
+ c.logger.Printf("Request to %s failed, backing-off for %s: got HTTP status %s", c.uri, wait, httpRsp.Status)
default:
- return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
+ return nil, nil, RspError{
+ StatusCode: httpRsp.StatusCode,
+ Body: body,
+ Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)}
}
}
if err := c.waitForBackoff(ctx); err != nil {
diff --git a/vendor/github.com/google/certificate-transparency-go/proto_gen.go b/vendor/github.com/google/certificate-transparency-go/proto_gen.go
new file mode 100644
index 0000000000..565c6bbbc8
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/proto_gen.go
@@ -0,0 +1,25 @@
+// Copyright 2021 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ct
+
+// We do the protoc generation here (rather than in the individual directories)
+// in order to work around the newly-enforced rule that all protobuf file "names"
+// must be unique.
+// See https://developers.google.com/protocol-buffers/docs/proto#packages and
+// https://github.com/golang/protobuf/issues/1122
+
+//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/ctfe/configpb/config.proto"
+//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/migrillian/configpb/config.proto"
+//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. client/configpb/multilog.proto"
diff --git a/vendor/github.com/google/certificate-transparency-go/serialization.go b/vendor/github.com/google/certificate-transparency-go/serialization.go
index 39053ecd30..2a6c21ed4c 100644
--- a/vendor/github.com/google/certificate-transparency-go/serialization.go
+++ b/vendor/github.com/google/certificate-transparency-go/serialization.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
+// Copyright 2015 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,9 +17,7 @@ package ct
import (
"crypto"
"crypto/sha256"
- "encoding/json"
"fmt"
- "strings"
"time"
"github.com/google/certificate-transparency-go/tls"
@@ -46,8 +44,6 @@ func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry)
IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
TBSCertificate: entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
}
- case XJSONLogEntryType:
- input.JSONEntry = entry.Leaf.TimestampedEntry.JSONEntry
default:
return nil, fmt.Errorf("unsupported entry type %s", entry.Leaf.TimestampedEntry.EntryType)
}
@@ -92,32 +88,6 @@ func CreateX509MerkleTreeLeaf(cert ASN1Cert, timestamp uint64) *MerkleTreeLeaf {
}
}
-// CreateJSONMerkleTreeLeaf creates the merkle tree leaf for json data.
-func CreateJSONMerkleTreeLeaf(data interface{}, timestamp uint64) *MerkleTreeLeaf {
- jsonData, err := json.Marshal(AddJSONRequest{Data: data})
- if err != nil {
- return nil
- }
- // Match the JSON serialization implemented by json-c
- jsonStr := strings.Replace(string(jsonData), ":", ": ", -1)
- jsonStr = strings.Replace(jsonStr, ",", ", ", -1)
- jsonStr = strings.Replace(jsonStr, "{", "{ ", -1)
- jsonStr = strings.Replace(jsonStr, "}", " }", -1)
- jsonStr = strings.Replace(jsonStr, "/", `\/`, -1)
- // TODO: Pending google/certificate-transparency#1243, replace with
- // ObjectHash once supported by CT server.
-
- return &MerkleTreeLeaf{
- Version: V1,
- LeafType: TimestampedEntryLeafType,
- TimestampedEntry: &TimestampedEntry{
- Timestamp: timestamp,
- EntryType: XJSONLogEntryType,
- JSONEntry: &JSONDataEntry{Data: []byte(jsonStr)},
- },
- }
-}
-
// MerkleTreeLeafFromRawChain generates a MerkleTreeLeaf from a chain (in DER-encoded form) and timestamp.
func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) {
// Need at most 3 of the chain
@@ -128,7 +98,7 @@ func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timesta
chain := make([]*x509.Certificate, count)
for i := range chain {
cert, err := x509.ParseCertificate(rawChain[i].Data)
- if err != nil {
+ if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
}
chain[i] = cert
@@ -248,60 +218,96 @@ func IsPreIssuer(issuer *x509.Certificate) bool {
return false
}
-// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data after JSON parsing)
-// into a LogEntry object (which includes x509.Certificate objects, after TLS and ASN.1 parsing).
-// Note that this function may return a valid LogEntry object and a non-nil error value, when
-// the error indicates a non-fatal parsing error (of type x509.NonFatalErrors).
-func LogEntryFromLeaf(index int64, leafEntry *LeafEntry) (*LogEntry, error) {
- var leaf MerkleTreeLeaf
- if rest, err := tls.Unmarshal(leafEntry.LeafInput, &leaf); err != nil {
- return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf for index %d: %v", index, err)
+// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
+// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure).
+func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) {
+ ret := RawLogEntry{Index: index}
+ if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err)
} else if len(rest) > 0 {
- return nil, fmt.Errorf("trailing data (%d bytes) after MerkleTreeLeaf for index %d", len(rest), index)
+ return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest))
}
- var err error
- entry := LogEntry{Index: index, Leaf: leaf}
- switch leaf.TimestampedEntry.EntryType {
+ switch eType := ret.Leaf.TimestampedEntry.EntryType; eType {
case X509LogEntryType:
var certChain CertificateChain
- if rest, err := tls.Unmarshal(leafEntry.ExtraData, &certChain); err != nil {
- return nil, fmt.Errorf("failed to unmarshal ExtraData for index %d: %v", index, err)
+ if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err)
} else if len(rest) > 0 {
- return nil, fmt.Errorf("trailing data (%d bytes) after CertificateChain for index %d", len(rest), index)
- }
- entry.Chain = certChain.Entries
- entry.X509Cert, err = leaf.X509Certificate()
- if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
- return nil, fmt.Errorf("failed to parse certificate in MerkleTreeLeaf for index %d: %v", index, err)
+ return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest))
}
+ ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry
+ ret.Chain = certChain.Entries
case PrecertLogEntryType:
var precertChain PrecertChainEntry
- if rest, err := tls.Unmarshal(leafEntry.ExtraData, &precertChain); err != nil {
- return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry for index %d: %v", index, err)
+ if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err)
} else if len(rest) > 0 {
- return nil, fmt.Errorf("trailing data (%d bytes) after PrecertChainEntry for index %d", len(rest), index)
+ return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest))
}
- entry.Chain = precertChain.CertificateChain
+ ret.Cert = precertChain.PreCertificate
+ ret.Chain = precertChain.CertificateChain
+
+ default:
+ // TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types
+ // are not errors. We should revisit how we process this case.
+ return nil, fmt.Errorf("unknown entry type: %v", eType)
+ }
+
+ return &ret, nil
+}
+
+// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed
+// (pre-)certificate.
+//
+// Note that this function may return a valid LogEntry object and a non-nil
+// error value, when the error indicates a non-fatal parsing error.
+func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) {
+ var err error
+ entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain}
+
+ switch eType := rle.Leaf.TimestampedEntry.EntryType; eType {
+ case X509LogEntryType:
+ entry.X509Cert, err = rle.Leaf.X509Certificate()
+ if x509.IsFatal(err) {
+ return nil, fmt.Errorf("failed to parse certificate: %v", err)
+ }
+
+ case PrecertLogEntryType:
var tbsCert *x509.Certificate
- tbsCert, err = leaf.Precertificate()
- if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
- return nil, fmt.Errorf("failed to parse precertificate in MerkleTreeLeaf for index %d: %v", index, err)
+ tbsCert, err = rle.Leaf.Precertificate()
+ if x509.IsFatal(err) {
+ return nil, fmt.Errorf("failed to parse precertificate: %v", err)
}
entry.Precert = &Precertificate{
- Submitted: precertChain.PreCertificate,
- IssuerKeyHash: leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
+ Submitted: rle.Cert,
+ IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
TBSCertificate: tbsCert,
}
default:
- return nil, fmt.Errorf("saw unknown entry type at index %d: %v", index, leaf.TimestampedEntry.EntryType)
+ return nil, fmt.Errorf("unknown entry type: %v", eType)
}
- // err may hold a x509.NonFatalErrors object.
+
+ // err may be non-nil for a non-fatal error.
return &entry, err
}
+// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
+// after JSON parsing) into a LogEntry object (which includes x509.Certificate
+// objects, after TLS and ASN.1 parsing).
+//
+// Note that this function may return a valid LogEntry object and a non-nil
+// error value, when the error indicates a non-fatal parsing error.
+func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) {
+ rle, err := RawLogEntryFromLeaf(index, leaf)
+ if err != nil {
+ return nil, err
+ }
+ return rle.ToLogEntry()
+}
+
// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds
// since UNIX epoch) to a Go Time.
func TimestampToTime(ts uint64) time.Time {
diff --git a/vendor/github.com/google/certificate-transparency-go/signatures.go b/vendor/github.com/google/certificate-transparency-go/signatures.go
index b1000ba464..b009008c6f 100644
--- a/vendor/github.com/google/certificate-transparency-go/signatures.go
+++ b/vendor/github.com/google/certificate-transparency-go/signatures.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
+// Copyright 2015 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -55,7 +55,7 @@ func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) {
// SignatureVerifier can verify signatures on SCTs and STHs
type SignatureVerifier struct {
- pubKey crypto.PublicKey
+ PubKey crypto.PublicKey
}
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
@@ -80,17 +80,15 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
}
default:
- return nil, fmt.Errorf("Unsupported public key type %v", pkType)
+ return nil, fmt.Errorf("unsupported public key type %v", pkType)
}
- return &SignatureVerifier{
- pubKey: pk,
- }, nil
+ return &SignatureVerifier{PubKey: pk}, nil
}
// VerifySignature verifies the given signature sig matches the data.
func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error {
- return tls.VerifySignature(s.pubKey, data, sig)
+ return tls.VerifySignature(s.PubKey, data, sig)
}
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry.
diff --git a/vendor/github.com/google/certificate-transparency-go/tls/signature.go b/vendor/github.com/google/certificate-transparency-go/tls/signature.go
index bfdb016d2f..c02b29827b 100644
--- a/vendor/github.com/google/certificate-transparency-go/tls/signature.go
+++ b/vendor/github.com/google/certificate-transparency-go/tls/signature.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
+// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@ package tls
import (
"crypto"
- "crypto/dsa"
+ "crypto/dsa" //nolint:staticcheck
"crypto/ecdsa"
_ "crypto/md5" // For registration side-effect
"crypto/rand"
diff --git a/vendor/github.com/google/certificate-transparency-go/tls/tls.go b/vendor/github.com/google/certificate-transparency-go/tls/tls.go
index 1bcd3a3796..030074c19a 100644
--- a/vendor/github.com/google/certificate-transparency-go/tls/tls.go
+++ b/vendor/github.com/google/certificate-transparency-go/tls/tls.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
+// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -106,41 +106,41 @@ var (
//
// For example, a TLS structure:
//
-// enum { e1(1), e2(2) } EnumType;
-// struct {
-// EnumType sel;
-// select(sel) {
-// case e1: uint16
-// case e2: uint32
-// } data;
-// } VariantItem;
+// enum { e1(1), e2(2) } EnumType;
+// struct {
+// EnumType sel;
+// select(sel) {
+// case e1: uint16
+// case e2: uint32
+// } data;
+// } VariantItem;
//
// would have a corresponding Go type:
//
-// type VariantItem struct {
-// Sel tls.Enum `tls:"maxval:2"`
-// Data16 *uint16 `tls:"selector:Sel,val:1"`
-// Data32 *uint32 `tls:"selector:Sel,val:2"`
-// }
+// type VariantItem struct {
+// Sel tls.Enum `tls:"maxval:2"`
+// Data16 *uint16 `tls:"selector:Sel,val:1"`
+// Data32 *uint32 `tls:"selector:Sel,val:2"`
+// }
//
// TLS fixed-length vectors of types other than opaque or uint8 are not supported.
//
// For TLS variable-length vectors that are themselves used in other vectors,
// create a single-field structure to represent the inner type. For example, for:
//
-// opaque InnerType<1..65535>;
-// struct {
-// InnerType inners<1,65535>;
-// } Something;
+// opaque InnerType<1..65535>;
+// struct {
+// InnerType inners<1,65535>;
+// } Something;
//
// convert to:
//
-// type InnerType struct {
-// Val []byte `tls:"minlen:1,maxlen:65535"`
-// }
-// type Something struct {
-// Inners []InnerType `tls:"minlen:1,maxlen:65535"`
-// }
+// type InnerType struct {
+// Val []byte `tls:"minlen:1,maxlen:65535"`
+// }
+// type Something struct {
+// Inners []InnerType `tls:"minlen:1,maxlen:65535"`
+// }
//
// If the encoded value does not fit in the Go type, Unmarshal returns a parse error.
func Unmarshal(b []byte, val interface{}) ([]byte, error) {
diff --git a/vendor/github.com/google/certificate-transparency-go/tls/types.go b/vendor/github.com/google/certificate-transparency-go/tls/types.go
index 14471ad264..b8eaf24bdd 100644
--- a/vendor/github.com/google/certificate-transparency-go/tls/types.go
+++ b/vendor/github.com/google/certificate-transparency-go/tls/types.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
+// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@ package tls
import (
"crypto"
- "crypto/dsa"
+ "crypto/dsa" //nolint:staticcheck
"crypto/ecdsa"
"crypto/rsa"
"fmt"
diff --git a/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/google/certificate-transparency-go/types.go
index bcdd7e9222..c797d9ceb6 100644
--- a/vendor/github.com/google/certificate-transparency-go/types.go
+++ b/vendor/github.com/google/certificate-transparency-go/types.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
+// Copyright 2015 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -31,14 +31,14 @@ import (
///////////////////////////////////////////////////////////////////////////////
// LogEntryType represents the LogEntryType enum from section 3.1:
-// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
+//
+// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
type LogEntryType tls.Enum // tls:"maxval:65535"
// LogEntryType constants from section 3.1.
const (
X509LogEntryType LogEntryType = 0
PrecertLogEntryType LogEntryType = 1
- XJSONLogEntryType LogEntryType = 0x8000 // Experimental. Don't rely on this!
)
func (e LogEntryType) String() string {
@@ -47,8 +47,6 @@ func (e LogEntryType) String() string {
return "X509LogEntryType"
case PrecertLogEntryType:
return "PrecertLogEntryType"
- case XJSONLogEntryType:
- return "XJSONLogEntryType"
default:
return fmt.Sprintf("UnknownEntryType(%d)", e)
}
@@ -61,7 +59,8 @@ const (
)
// MerkleLeafType represents the MerkleLeafType enum from section 3.4:
-// enum { timestamped_entry(0), (255) } MerkleLeafType;
+//
+// enum { timestamped_entry(0), (255) } MerkleLeafType;
type MerkleLeafType tls.Enum // tls:"maxval:255"
// TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4.
@@ -77,7 +76,8 @@ func (m MerkleLeafType) String() string {
}
// Version represents the Version enum from section 3.2:
-// enum { v1(0), (255) } Version;
+//
+// enum { v1(0), (255) } Version;
type Version tls.Enum // tls:"maxval:255"
// CT Version constants from section 3.2.
@@ -95,7 +95,8 @@ func (v Version) String() string {
}
// SignatureType differentiates STH signatures from SCT signatures, see section 3.2.
-// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType;
+//
+// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType;
type SignatureType tls.Enum // tls:"maxval:255"
// SignatureType constants from section 3.2.
@@ -135,7 +136,7 @@ type PreCert struct {
// CTExtensions is a representation of the raw bytes of any CtExtension
// structure (see section 3.2).
-// nolint: golint
+// nolint: revive
type CTExtensions []byte // tls:"minlen:0,maxlen:65535"`
// MerkleTreeNode represents an internal node in the CT tree.
@@ -199,6 +200,25 @@ func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
return d.FromBase64String(content)
}
+// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log.
+type RawLogEntry struct {
+ // Index is a position of the entry in the log.
+ Index int64
+ // Leaf is a parsed Merkle leaf hash input.
+ Leaf MerkleTreeLeaf
+ // Cert is:
+ // - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType.
+ // - A precertificate if Leaf.TimestampedEntry.EntryType is
+ // PrecertLogEntryType, in the form of a DER-encoded Certificate as
+ // originally added (which includes the poison extension and a signature
+ // generated over the pre-cert by the pre-cert issuer).
+ // - Empty otherwise.
+ Cert ASN1Cert
+ // Chain is the issuing certificate chain starting with the issuer of Cert,
+ // or an empty slice if Cert is empty.
+ Chain []ASN1Cert
+}
+
// LogEntry represents the (parsed) contents of an entry in a CT log. This is described
// in section 3.1, but note that this structure does *not* match the TLS structure
// defined there (the TLS structure is never used directly in RFC6962).
@@ -279,6 +299,23 @@ type SignedTreeHead struct {
LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
}
+func (s SignedTreeHead) String() string {
+ sigStr, err := s.TreeHeadSignature.Base64String()
+ if err != nil {
+ sigStr = tls.DigitallySigned(s.TreeHeadSignature).String()
+ }
+
+ // If the LogID field in the SignedTreeHead is empty, don't include it in
+ // the string.
+ var logIDStr string
+ if id, empty := s.LogID, (SHA256Hash{}); id != empty {
+ logIDStr = fmt.Sprintf("LogID:%s, ", id.Base64String())
+ }
+
+ return fmt.Sprintf("{%sTreeSize:%d, Timestamp:%d, SHA256RootHash:%q, TreeHeadSignature:%q}",
+ logIDStr, s.TreeSize, s.Timestamp, s.SHA256RootHash.Base64String(), sigStr)
+}
+
// TreeHeadSignature holds the data over which the signature in an STH is
// generated; see section 3.5
type TreeHeadSignature struct {
@@ -426,6 +463,36 @@ type AddChainResponse struct {
Signature []byte `json:"signature"` // Log signature for this SCT
}
+// ToSignedCertificateTimestamp creates a SignedCertificateTimestamp from the
+// AddChainResponse.
+func (r *AddChainResponse) ToSignedCertificateTimestamp() (*SignedCertificateTimestamp, error) {
+ sct := SignedCertificateTimestamp{
+ SCTVersion: r.SCTVersion,
+ Timestamp: r.Timestamp,
+ }
+
+ if len(r.ID) != sha256.Size {
+ return nil, fmt.Errorf("id is invalid length, expected %d got %d", sha256.Size, len(r.ID))
+ }
+ copy(sct.LogID.KeyID[:], r.ID)
+
+ exts, err := base64.StdEncoding.DecodeString(r.Extensions)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 data in Extensions (%q): %v", r.Extensions, err)
+ }
+ sct.Extensions = CTExtensions(exts)
+
+ var ds DigitallySigned
+ if rest, err := tls.Unmarshal(r.Signature, &ds); err != nil {
+ return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
+ }
+ sct.Signature = ds
+
+ return &sct, nil
+}
+
// AddJSONRequest represents the JSON request body sent to the add-json POST method.
// The corresponding response re-uses AddChainResponse.
// This is an experimental addition not covered by RFC6962.
@@ -433,7 +500,7 @@ type AddJSONRequest struct {
Data interface{} `json:"data"`
}
-// GetSTHResponse respresents the JSON response to the get-sth GET method from section 4.3.
+// GetSTHResponse represents the JSON response to the get-sth GET method from section 4.3.
type GetSTHResponse struct {
TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree
Timestamp uint64 `json:"timestamp"` // Time that the tree was created
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/README.md b/vendor/github.com/google/certificate-transparency-go/x509/README.md
new file mode 100644
index 0000000000..6f22f5f834
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/README.md
@@ -0,0 +1,7 @@
+# Important Notice
+
+This is a fork of the `crypto/x509` Go package. The original source can be found on
+[GitHub](https://github.com/golang/go).
+
+Be careful about making local modifications to this code as it will
+make maintenance harder in future.
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go
index 71ffbdf0e0..4823d59463 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go
@@ -25,45 +25,61 @@ func NewCertPool() *CertPool {
}
}
+func (s *CertPool) copy() *CertPool {
+ p := &CertPool{
+ bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)),
+ byName: make(map[string][]int, len(s.byName)),
+ certs: make([]*Certificate, len(s.certs)),
+ }
+ for k, v := range s.bySubjectKeyId {
+ indexes := make([]int, len(v))
+ copy(indexes, v)
+ p.bySubjectKeyId[k] = indexes
+ }
+ for k, v := range s.byName {
+ indexes := make([]int, len(v))
+ copy(indexes, v)
+ p.byName[k] = indexes
+ }
+ copy(p.certs, s.certs)
+ return p
+}
+
// SystemCertPool returns a copy of the system cert pool.
//
// Any mutations to the returned pool are not written to disk and do
-// not affect any other pool.
+// not affect any other pool returned by SystemCertPool.
+//
+// New changes in the system cert pool might not be reflected
+// in subsequent calls.
func SystemCertPool() (*CertPool, error) {
if runtime.GOOS == "windows" {
// Issue 16736, 18609:
return nil, errors.New("crypto/x509: system root pool is not available on Windows")
}
+ if sysRoots := systemRootsPool(); sysRoots != nil {
+ return sysRoots.copy(), nil
+ }
+
return loadSystemRoots()
}
-// findVerifiedParents attempts to find certificates in s which have signed the
-// given certificate. If any candidates were rejected then errCert will be set
-// to one of them, arbitrarily, and err will contain the reason that it was
-// rejected.
-func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) {
+// findPotentialParents returns the indexes of certificates in s which might
+// have signed cert. The caller must not modify the returned slice.
+func (s *CertPool) findPotentialParents(cert *Certificate) []int {
if s == nil {
- return
+ return nil
}
- var candidates []int
+ var candidates []int
if len(cert.AuthorityKeyId) > 0 {
candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)]
}
if len(candidates) == 0 {
candidates = s.byName[string(cert.RawIssuer)]
}
-
- for _, c := range candidates {
- if err = cert.CheckSignatureFrom(s.certs[c]); err == nil {
- parents = append(parents, c)
- } else {
- errCert = s.certs[c]
- }
- }
-
- return
+ return candidates
}
func (s *CertPool) contains(cert *Certificate) bool {
@@ -121,7 +137,7 @@ func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
}
cert, err := ParseCertificate(block.Bytes)
- if err != nil {
+ if IsFatal(err) {
continue
}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/curves.go b/vendor/github.com/google/certificate-transparency-go/x509/curves.go
new file mode 100644
index 0000000000..0e2778cb35
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/curves.go
@@ -0,0 +1,37 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/elliptic"
+ "math/big"
+ "sync"
+)
+
+// This file holds ECC curves that are not supported by the main Go crypto/elliptic
+// library, but which have been observed in certificates in the wild.
+
+var initonce sync.Once
+var p192r1 *elliptic.CurveParams
+
+func initAllCurves() {
+ initSECP192R1()
+}
+
+func initSECP192R1() {
+ // See SEC-2, section 2.2.2
+ p192r1 = &elliptic.CurveParams{Name: "P-192"}
+ p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16)
+ p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16)
+ p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16)
+ p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16)
+ p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16)
+ p192r1.BitSize = 192
+}
+
+func secp192r1() elliptic.Curve {
+ initonce.Do(initAllCurves)
+ return p192r1
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/error.go b/vendor/github.com/google/certificate-transparency-go/x509/error.go
index 63360ec8e2..40b7ef7d9f 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/error.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/error.go
@@ -163,12 +163,18 @@ func (e *Errors) Fatal() bool {
// Empty indicates whether e has no errors.
func (e *Errors) Empty() bool {
+ if e == nil {
+ return true
+ }
return len(e.Errs) == 0
}
// FirstFatal returns the first fatal error in e, or nil
// if there is no fatal error.
func (e *Errors) FirstFatal() error {
+ if e == nil {
+ return nil
+ }
for _, err := range e.Errs {
if err.Fatal {
return err
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/names.go b/vendor/github.com/google/certificate-transparency-go/x509/names.go
index 3ff0b7d428..4829edeb04 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/names.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/names.go
@@ -27,9 +27,10 @@ const (
// OtherName describes a name related to a certificate which is not in one
// of the standard name formats. RFC 5280, 4.2.1.6:
-// OtherName ::= SEQUENCE {
-// type-id OBJECT IDENTIFIER,
-// value [0] EXPLICIT ANY DEFINED BY type-id }
+//
+// OtherName ::= SEQUENCE {
+// type-id OBJECT IDENTIFIER,
+// value [0] EXPLICIT ANY DEFINED BY type-id }
type OtherName struct {
TypeID asn1.ObjectIdentifier
Value asn1.RawValue
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/nilref_nil_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/nilref_nil_darwin.go
deleted file mode 100644
index d3e8af7729..0000000000
--- a/vendor/github.com/google/certificate-transparency-go/x509/nilref_nil_darwin.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build cgo,!arm,!arm64,!ios,!go1.10
-
-package x509
-
-/*
-#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
-#cgo LDFLAGS: -framework CoreFoundation -framework Security
-
-#include <CoreFoundation/CoreFoundation.h>
-*/
-import "C"
-
-// For Go versions before 1.10, nil values for Apple's CoreFoundation
-// CF*Ref types were represented by nil. See:
-// https://github.com/golang/go/commit/b868616b63a8
-func setNilCFRef(v *C.CFDataRef) {
- *v = nil
-}
-
-func isNilCFRef(v C.CFDataRef) bool {
- return v == nil
-}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go
deleted file mode 100644
index 6d8ad49866..0000000000
--- a/vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build cgo,!arm,!arm64,!ios,go1.10
-
-package x509
-
-/*
-#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
-#cgo LDFLAGS: -framework CoreFoundation -framework Security
-
-#include <CoreFoundation/CoreFoundation.h>
-*/
-import "C"
-
-// For Go versions >= 1.10, nil values for Apple's CoreFoundation
-// CF*Ref types are represented by zero. See:
-// https://github.com/golang/go/commit/b868616b63a8
-func setNilCFRef(v *C.CFDataRef) {
- *v = 0
-}
-
-func isNilCFRef(v C.CFDataRef) bool {
- return v == 0
-}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go
index 0388d63e14..93d1e4a922 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go
@@ -203,7 +203,7 @@ func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, al
// the data separately, but it doesn't seem worth the additional
// code.
copy(encrypted, data)
- // See RFC 1423, section 1.1
+ // See RFC 1423, Section 1.1.
for i := 0; i < pad; i++ {
encrypted = append(encrypted, byte(pad))
}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go
index e50e1a8517..bea05b57fd 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go
@@ -42,7 +42,9 @@ type pkcs1PublicKey struct {
E int
}
-// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form.
+// ParsePKCS1PrivateKey parses an RSA private key in PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY".
func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) {
var priv pkcs1PrivateKey
rest, err := asn1.Unmarshal(der, &priv)
@@ -50,6 +52,12 @@ func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
if err != nil {
+ if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)")
+ }
+ if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)")
+ }
return nil, err
}
@@ -89,7 +97,11 @@ func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) {
return key, nil
}
-// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form.
+// MarshalPKCS1PrivateKey converts an RSA private key to PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY".
+// For a more flexible key format which is not RSA specific, use
+// MarshalPKCS8PrivateKey.
func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
key.Precompute()
@@ -121,11 +133,16 @@ func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
return b
}
-// ParsePKCS1PublicKey parses a PKCS#1 public key in ASN.1 DER form.
+// ParsePKCS1PublicKey parses an RSA public key in PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY".
func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) {
var pub pkcs1PublicKey
rest, err := asn1.Unmarshal(der, &pub)
if err != nil {
+ if _, err := asn1.Unmarshal(der, &publicKeyInfo{}); err == nil {
+ return nil, errors.New("x509: failed to parse public key (use ParsePKIXPublicKey instead for this key format)")
+ }
return nil, err
}
if len(rest) > 0 {
@@ -146,6 +163,8 @@ func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) {
}
// MarshalPKCS1PublicKey converts an RSA public key to PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY".
func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte {
derBytes, _ := asn1.Marshal(pkcs1PublicKey{
N: key.N,
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go
index b22338ccdf..a144eb6a5d 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go
@@ -12,6 +12,9 @@ import (
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
+
+ // TODO(robpercival): change this to crypto/ed25519 when Go 1.13 is min version
+ "golang.org/x/crypto/ed25519"
)
// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See
@@ -24,11 +27,21 @@ type pkcs8 struct {
// optional attributes omitted.
}
-// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key.
-// See RFC 5208.
+// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS#8, ASN.1 DER form.
+//
+// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey.
+// More types might be supported in the future.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY".
func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
var privKey pkcs8
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)")
+ }
+ if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)")
+ }
return nil, err
}
switch {
@@ -51,16 +64,30 @@ func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
}
return key, nil
+ case privKey.Algo.Algorithm.Equal(OIDPublicKeyEd25519):
+ if l := len(privKey.Algo.Parameters.FullBytes); l != 0 {
+ return nil, errors.New("x509: invalid Ed25519 private key parameters")
+ }
+ var curvePrivateKey []byte
+ if _, err := asn1.Unmarshal(privKey.PrivateKey, &curvePrivateKey); err != nil {
+ return nil, fmt.Errorf("x509: invalid Ed25519 private key: %v", err)
+ }
+ if l := len(curvePrivateKey); l != ed25519.SeedSize {
+ return nil, fmt.Errorf("x509: invalid Ed25519 private key length: %d", l)
+ }
+ return ed25519.NewKeyFromSeed(curvePrivateKey), nil
+
default:
return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm)
}
}
-// MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form.
-// The following key types are supported: *rsa.PrivateKey, *ecdsa.PublicKey.
-// Unsupported key types result in an error.
+// MarshalPKCS8PrivateKey converts a private key to PKCS#8, ASN.1 DER form.
+//
+// The following key types are currently supported: *rsa.PrivateKey, *ecdsa.PrivateKey
+// and ed25519.PrivateKey. Unsupported key types result in an error.
//
-// See RFC 5208.
+// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY".
func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) {
var privKey pkcs8
@@ -75,7 +102,7 @@ func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) {
case *ecdsa.PrivateKey:
oid, ok := OIDFromNamedCurve(k.Curve)
if !ok {
- return nil, errors.New("x509: unknown curve while marshalling to PKCS#8")
+ return nil, errors.New("x509: unknown curve while marshaling to PKCS#8")
}
oidBytes, err := asn1.Marshal(oid)
@@ -94,8 +121,18 @@ func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) {
return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error())
}
+ case ed25519.PrivateKey:
+ privKey.Algo = pkix.AlgorithmIdentifier{
+ Algorithm: OIDPublicKeyEd25519,
+ }
+ curvePrivateKey, err := asn1.Marshal(k.Seed())
+ if err != nil {
+ return nil, fmt.Errorf("x509: failed to marshal private key: %v", err)
+ }
+ privKey.PrivateKey = curvePrivateKey
+
default:
- return nil, fmt.Errorf("x509: unknown key type while marshalling PKCS#8: %T", key)
+ return nil, fmt.Errorf("x509: unknown key type while marshaling PKCS#8: %T", key)
}
return asn1.Marshal(privKey)
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go
index ccba8761f2..843fa1f2cd 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go
@@ -7,14 +7,12 @@
package pkix
import (
- // START CT CHANGES
"encoding/hex"
"fmt"
-
- "github.com/google/certificate-transparency-go/asn1"
- // END CT CHANGES
"math/big"
"time"
+
+ "github.com/google/certificate-transparency-go/asn1"
)
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
@@ -98,7 +96,7 @@ func (r RDNSequence) String() string {
type RelativeDistinguishedNameSET []AttributeTypeAndValue
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
-// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
+// RFC 5280, Section 4.1.2.4.
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
@@ -240,7 +238,7 @@ func (n Name) String() string {
return n.ToRDNSequence().String()
}
-// oidInAttributeTypeAndValue returns whether a type with the given OID exists
+// oidInAttributeTypeAndValue reports whether a type with the given OID exists
// in atv.
func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
for _, a := range atv {
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
index 3543e3042c..06fd439c1f 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build go1.11
// +build go1.11
package x509
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
index 3908833a89..f13a47adfb 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !go1.11
// +build !go1.11
package x509
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/revoked.go b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go
index e704441639..e5fa6dd15f 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/revoked.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
+// Copyright 2017 Google LLC. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -14,12 +14,15 @@ import (
"github.com/google/certificate-transparency-go/x509/pkix"
)
+// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
var (
- // OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
- // OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
+)
+
+// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
+var (
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
@@ -238,7 +241,7 @@ func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
}
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
// RFC 5280 s5.2.7
- var aia []authorityInfoAccess
+ var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
} else if len(rest) != 0 {
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root.go b/vendor/github.com/google/certificate-transparency-go/x509/root.go
index 787d955be4..240296247d 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root.go
@@ -19,4 +19,7 @@ func systemRootsPool() *CertPool {
func initSystemRoots() {
systemRoots, systemRootsErr = loadSystemRoots()
+ if systemRootsErr != nil {
+ systemRoots = nil
+ }
}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go
index 1371933891..8c04bdcdfa 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build dragonfly || freebsd || netbsd || openbsd
// +build dragonfly freebsd netbsd openbsd
package x509
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go
index 6c2f21d903..dba99bb8dc 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build cgo && !arm && !arm64 && !ios
// +build cgo,!arm,!arm64,!ios
package x509
/*
-#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
+#cgo CFLAGS: -mmacosx-version-min=10.10 -D__MAC_OS_X_VERSION_MAX_ALLOWED=101300
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <errno.h>
@@ -16,60 +17,142 @@ package x509
#include <CoreFoundation/CoreFoundation.h>
#include <Security/Security.h>
-// FetchPEMRootsCTX509_MountainLion is the version of FetchPEMRoots from Go 1.6
-// which still works on OS X 10.8 (Mountain Lion).
-// It lacks support for admin & user cert domains.
-// See golang.org/issue/16473
-int FetchPEMRootsCTX509_MountainLion(CFDataRef *pemRoots) {
- if (pemRoots == NULL) {
- return -1;
+static Boolean isSSLPolicy(SecPolicyRef policyRef) {
+ if (!policyRef) {
+ return false;
}
- CFArrayRef certs = NULL;
- OSStatus err = SecTrustCopyAnchorCertificates(&certs);
- if (err != noErr) {
- return -1;
+ CFDictionaryRef properties = SecPolicyCopyProperties(policyRef);
+ if (properties == NULL) {
+ return false;
}
- CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
- int i, ncerts = CFArrayGetCount(certs);
- for (i = 0; i < ncerts; i++) {
- CFDataRef data = NULL;
- SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
- if (cert == NULL) {
- continue;
+ Boolean isSSL = false;
+ CFTypeRef value = NULL;
+ if (CFDictionaryGetValueIfPresent(properties, kSecPolicyOid, (const void **)&value)) {
+ isSSL = CFEqual(value, kSecPolicyAppleSSL);
+ }
+ CFRelease(properties);
+ return isSSL;
+}
+
+// sslTrustSettingsResult obtains the final kSecTrustSettingsResult value
+// for a certificate in the user or admin domain, combining usage constraints
+// for the SSL SecTrustSettingsPolicy, ignoring SecTrustSettingsKeyUsage and
+// kSecTrustSettingsAllowedError.
+// https://developer.apple.com/documentation/security/1400261-sectrustsettingscopytrustsetting
+static SInt32 sslTrustSettingsResult(SecCertificateRef cert) {
+ CFArrayRef trustSettings = NULL;
+ OSStatus err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainUser, &trustSettings);
+
+ // According to Apple's SecTrustServer.c, "user trust settings overrule admin trust settings",
+ // but the rules of the override are unclear. Let's assume admin trust settings are applicable
+ // if and only if user trust settings fail to load or are NULL.
+ if (err != errSecSuccess || trustSettings == NULL) {
+ if (trustSettings != NULL) CFRelease(trustSettings);
+ err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainAdmin, &trustSettings);
+ }
+
+ // > no trust settings [...] means "this certificate must be verified to a known trusted certificate”
+ // (Should this cause a fallback from user to admin domain? It's unclear.)
+ if (err != errSecSuccess || trustSettings == NULL) {
+ if (trustSettings != NULL) CFRelease(trustSettings);
+ return kSecTrustSettingsResultUnspecified;
+ }
+
+ // > An empty trust settings array means "always trust this certificate” with an
+ // > overall trust setting for the certificate of kSecTrustSettingsResultTrustRoot.
+ if (CFArrayGetCount(trustSettings) == 0) {
+ CFRelease(trustSettings);
+ return kSecTrustSettingsResultTrustRoot;
+ }
+
+ // kSecTrustSettingsResult is defined as CFSTR("kSecTrustSettingsResult"),
+ // but the Go linker's internal linking mode can't handle CFSTR relocations.
+ // Create our own dynamic string instead and release it below.
+ CFStringRef _kSecTrustSettingsResult = CFStringCreateWithCString(
+ NULL, "kSecTrustSettingsResult", kCFStringEncodingUTF8);
+ CFStringRef _kSecTrustSettingsPolicy = CFStringCreateWithCString(
+ NULL, "kSecTrustSettingsPolicy", kCFStringEncodingUTF8);
+ CFStringRef _kSecTrustSettingsPolicyString = CFStringCreateWithCString(
+ NULL, "kSecTrustSettingsPolicyString", kCFStringEncodingUTF8);
+
+ CFIndex m; SInt32 result = 0;
+ for (m = 0; m < CFArrayGetCount(trustSettings); m++) {
+ CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, m);
+
+ // First, check if this trust setting is constrained to a non-SSL policy.
+ SecPolicyRef policyRef;
+ if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsPolicy, (const void**)&policyRef)) {
+ if (!isSSLPolicy(policyRef)) {
+ continue;
+ }
}
- // Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
- // Once we support weak imports via cgo we should prefer that, and fall back to this
- // for older systems.
- err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
- if (err != noErr) {
+
+ if (CFDictionaryContainsKey(tSetting, _kSecTrustSettingsPolicyString)) {
+ // Restricted to a hostname, not a root.
continue;
}
- if (data != NULL) {
- CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
- CFRelease(data);
+
+ CFNumberRef cfNum;
+ if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsResult, (const void**)&cfNum)) {
+ CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result);
+ } else {
+ // > If this key is not present, a default value of
+ // > kSecTrustSettingsResultTrustRoot is assumed.
+ result = kSecTrustSettingsResultTrustRoot;
+ }
+
+ // If multiple dictionaries match, we are supposed to "OR" them,
+ // the semantics of which are not clear. Since TrustRoot and TrustAsRoot
+ // are mutually exclusive, Deny should probably override, and Invalid and
+ // Unspecified be overridden, approximate this by stopping at the first
+ // TrustRoot, TrustAsRoot or Deny.
+ if (result == kSecTrustSettingsResultTrustRoot) {
+ break;
+ } else if (result == kSecTrustSettingsResultTrustAsRoot) {
+ break;
+ } else if (result == kSecTrustSettingsResultDeny) {
+ break;
}
}
- CFRelease(certs);
- *pemRoots = combinedData;
- return 0;
+
+ // If trust settings are present, but none of them match the policy...
+ // the docs don't tell us what to do.
+ //
+ // "Trust settings for a given use apply if any of the dictionaries in the
+ // certificate’s trust settings array satisfies the specified use." suggests
+ // that it's as if there were no trust settings at all, so we should probably
+ // fallback to the admin trust settings. TODO.
+ if (result == 0) {
+ result = kSecTrustSettingsResultUnspecified;
+ }
+
+ CFRelease(_kSecTrustSettingsPolicy);
+ CFRelease(_kSecTrustSettingsPolicyString);
+ CFRelease(_kSecTrustSettingsResult);
+ CFRelease(trustSettings);
+
+ return result;
}
-// useOldCodeCTX509 reports whether the running machine is OS X 10.8 Mountain Lion
-// or older. We only support Mountain Lion and higher, but we'll at least try our
-// best on older machines and continue to use the old code path.
-//
-// See golang.org/issue/16473
-int useOldCodeCTX509() {
- char str[256];
- size_t size = sizeof(str);
- memset(str, 0, size);
- sysctlbyname("kern.osrelease", str, &size, NULL, 0);
- // OS X 10.8 is osrelease "12.*", 10.7 is 11.*, 10.6 is 10.*.
- // We never supported things before that.
- return memcmp(str, "12.", 3) == 0 || memcmp(str, "11.", 3) == 0 || memcmp(str, "10.", 3) == 0;
+// isRootCertificate reports whether Subject and Issuer match.
+static Boolean isRootCertificate(SecCertificateRef cert, CFErrorRef *errRef) {
+ CFDataRef subjectName = SecCertificateCopyNormalizedSubjectContent(cert, errRef);
+ if (*errRef != NULL) {
+ return false;
+ }
+ CFDataRef issuerName = SecCertificateCopyNormalizedIssuerContent(cert, errRef);
+ if (*errRef != NULL) {
+ CFRelease(subjectName);
+ return false;
+ }
+ Boolean equal = CFEqual(subjectName, issuerName);
+ CFRelease(subjectName);
+ CFRelease(issuerName);
+ return equal;
}
-// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates.
+// CopyPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates
+// for the kSecTrustSettingsPolicy SSL.
//
// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
// certificates of the system. On failure, the function returns -1.
@@ -77,31 +160,32 @@ int useOldCodeCTX509() {
//
// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must
// be released (using CFRelease) after we've consumed its content.
-int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
- if (useOldCodeCTX509()) {
- return FetchPEMRootsCTX509_MountainLion(pemRoots);
+static int CopyPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots, bool debugDarwinRoots) {
+ int i;
+
+ if (debugDarwinRoots) {
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultInvalid = %d\n", kSecTrustSettingsResultInvalid);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustRoot = %d\n", kSecTrustSettingsResultTrustRoot);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustAsRoot = %d\n", kSecTrustSettingsResultTrustAsRoot);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultDeny = %d\n", kSecTrustSettingsResultDeny);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultUnspecified = %d\n", kSecTrustSettingsResultUnspecified);
}
// Get certificates from all domains, not just System, this lets
// the user add CAs to their "login" keychain, and Admins to add
// to the "System" keychain
SecTrustSettingsDomain domains[] = { kSecTrustSettingsDomainSystem,
- kSecTrustSettingsDomainAdmin,
- kSecTrustSettingsDomainUser };
+ kSecTrustSettingsDomainAdmin, kSecTrustSettingsDomainUser };
int numDomains = sizeof(domains)/sizeof(SecTrustSettingsDomain);
- if (pemRoots == NULL) {
+ if (pemRoots == NULL || untrustedPemRoots == NULL) {
return -1;
}
- // kSecTrustSettingsResult is defined as CFSTR("kSecTrustSettingsResult"),
- // but the Go linker's internal linking mode can't handle CFSTR relocations.
- // Create our own dynamic string instead and release it below.
- CFStringRef policy = CFStringCreateWithCString(NULL, "kSecTrustSettingsResult", kCFStringEncodingUTF8);
-
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
- for (int i = 0; i < numDomains; i++) {
+ for (i = 0; i < numDomains; i++) {
+ int j;
CFArrayRef certs = NULL;
OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs);
if (err != noErr) {
@@ -109,104 +193,86 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
}
CFIndex numCerts = CFArrayGetCount(certs);
- for (int j = 0; j < numCerts; j++) {
- CFDataRef data = NULL;
- CFErrorRef errRef = NULL;
- CFArrayRef trustSettings = NULL;
+ for (j = 0; j < numCerts; j++) {
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, j);
if (cert == NULL) {
continue;
}
- // We only want trusted certs.
- int untrusted = 0;
- int trustAsRoot = 0;
- int trustRoot = 0;
- if (i == 0) {
- trustAsRoot = 1;
- } else {
+
+ SInt32 result;
+ if (domains[i] == kSecTrustSettingsDomainSystem) {
// Certs found in the system domain are always trusted. If the user
// configures "Never Trust" on such a cert, it will also be found in the
// admin or user domain, causing it to be added to untrustedPemRoots. The
// Go code will then clean this up.
-
- // Trust may be stored in any of the domains. According to Apple's
- // SecTrustServer.c, "user trust settings overrule admin trust settings",
- // so take the last trust settings array we find.
- // Skip the system domain since it is always trusted.
- for (int k = i; k < numDomains; k++) {
- CFArrayRef domainTrustSettings = NULL;
- err = SecTrustSettingsCopyTrustSettings(cert, domains[k], &domainTrustSettings);
- if (err == errSecSuccess && domainTrustSettings != NULL) {
- if (trustSettings) {
- CFRelease(trustSettings);
- }
- trustSettings = domainTrustSettings;
+ result = kSecTrustSettingsResultTrustRoot;
+ } else {
+ result = sslTrustSettingsResult(cert);
+ if (debugDarwinRoots) {
+ CFErrorRef errRef = NULL;
+ CFStringRef summary = SecCertificateCopyShortDescription(NULL, cert, &errRef);
+ if (errRef != NULL) {
+ fprintf(stderr, "crypto/x509: SecCertificateCopyShortDescription failed\n");
+ CFRelease(errRef);
+ continue;
}
- }
- if (trustSettings == NULL) {
- // "this certificate must be verified to a known trusted certificate"; aka not a root.
- continue;
- }
- for (CFIndex k = 0; k < CFArrayGetCount(trustSettings); k++) {
- CFNumberRef cfNum;
- CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, k);
- if (CFDictionaryGetValueIfPresent(tSetting, policy, (const void**)&cfNum)){
- SInt32 result = 0;
- CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result);
- // TODO: The rest of the dictionary specifies conditions for evaluation.
- if (result == kSecTrustSettingsResultDeny) {
- untrusted = 1;
- } else if (result == kSecTrustSettingsResultTrustAsRoot) {
- trustAsRoot = 1;
- } else if (result == kSecTrustSettingsResultTrustRoot) {
- trustRoot = 1;
- }
+
+ CFIndex length = CFStringGetLength(summary);
+ CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
+ char *buffer = malloc(maxSize);
+ if (CFStringGetCString(summary, buffer, maxSize, kCFStringEncodingUTF8)) {
+ fprintf(stderr, "crypto/x509: %s returned %d\n", buffer, (int)result);
}
+ free(buffer);
+ CFRelease(summary);
}
- CFRelease(trustSettings);
}
- if (trustRoot) {
- // We only want to add Root CAs, so make sure Subject and Issuer Name match
- CFDataRef subjectName = SecCertificateCopyNormalizedSubjectContent(cert, &errRef);
- if (errRef != NULL) {
- CFRelease(errRef);
- continue;
- }
- CFDataRef issuerName = SecCertificateCopyNormalizedIssuerContent(cert, &errRef);
- if (errRef != NULL) {
- CFRelease(subjectName);
- CFRelease(errRef);
+ CFMutableDataRef appendTo;
+ // > Note the distinction between the results kSecTrustSettingsResultTrustRoot
+ // > and kSecTrustSettingsResultTrustAsRoot: The former can only be applied to
+ // > root (self-signed) certificates; the latter can only be applied to
+ // > non-root certificates.
+ if (result == kSecTrustSettingsResultTrustRoot) {
+ CFErrorRef errRef = NULL;
+ if (!isRootCertificate(cert, &errRef) || errRef != NULL) {
+ if (errRef != NULL) CFRelease(errRef);
continue;
}
- Boolean equal = CFEqual(subjectName, issuerName);
- CFRelease(subjectName);
- CFRelease(issuerName);
- if (!equal) {
+
+ appendTo = combinedData;
+ } else if (result == kSecTrustSettingsResultTrustAsRoot) {
+ CFErrorRef errRef = NULL;
+ if (isRootCertificate(cert, &errRef) || errRef != NULL) {
+ if (errRef != NULL) CFRelease(errRef);
continue;
}
+
+ appendTo = combinedData;
+ } else if (result == kSecTrustSettingsResultDeny) {
+ appendTo = combinedUntrustedData;
+ } else if (result == kSecTrustSettingsResultUnspecified) {
+ // Certificates with unspecified trust should probably be added to a pool of
+ // intermediates for chain building, or checked for transitive trust and
+ // added to the root pool (which is an imprecise approximation because it
+ // cuts chains short) but we don't support either at the moment. TODO.
+ continue;
+ } else {
+ continue;
}
- // Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
- // Once we support weak imports via cgo we should prefer that, and fall back to this
- // for older systems.
- err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
+ CFDataRef data = NULL;
+ err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
-
if (data != NULL) {
- if (!trustRoot && !trustAsRoot) {
- untrusted = 1;
- }
- CFMutableDataRef appendTo = untrusted ? combinedUntrustedData : combinedData;
CFDataAppendBytes(appendTo, CFDataGetBytePtr(data), CFDataGetLength(data));
CFRelease(data);
}
}
CFRelease(certs);
}
- CFRelease(policy);
*pemRoots = combinedData;
*untrustedPemRoots = combinedUntrustedData;
return 0;
@@ -219,25 +285,22 @@ import (
)
func loadSystemRoots() (*CertPool, error) {
- roots := NewCertPool()
-
- var data C.CFDataRef
- setNilCFRef(&data)
- var untrustedData C.CFDataRef
- setNilCFRef(&untrustedData)
- err := C.FetchPEMRootsCTX509(&data, &untrustedData)
+ var data, untrustedData C.CFDataRef
+ err := C.CopyPEMRootsCTX509(&data, &untrustedData, C.bool(debugDarwinRoots))
if err == -1 {
- // TODO: better error message
return nil, errors.New("crypto/x509: failed to load darwin system roots with cgo")
}
-
defer C.CFRelease(C.CFTypeRef(data))
+ defer C.CFRelease(C.CFTypeRef(untrustedData))
+
buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
+ roots := NewCertPool()
roots.AppendCertsFromPEM(buf)
- if isNilCFRef(untrustedData) {
+
+ if C.CFDataGetLength(untrustedData) == 0 {
return roots, nil
}
- defer C.CFRelease(C.CFTypeRef(untrustedData))
+
buf = C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(untrustedData)), C.int(C.CFDataGetLength(untrustedData)))
untrustedRoots := NewCertPool()
untrustedRoots.AppendCertsFromPEM(buf)
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go
index bc35a1cf21..4330ae97a4 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go
@@ -13,7 +13,6 @@ import (
"encoding/pem"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"os/user"
@@ -22,7 +21,7 @@ import (
"sync"
)
-var debugExecDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1")
+var debugDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1")
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
return nil, nil
@@ -38,42 +37,41 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
//
// The strategy is as follows:
//
-// 1. Run "security trust-settings-export" and "security
-// trust-settings-export -d" to discover the set of certs with some
-// user-tweaked trust policy. We're too lazy to parse the XML (at
-// least at this stage of Go 1.8) to understand what the trust
-// policy actually is. We just learn that there is _some_ policy.
+// 1. Run "security trust-settings-export" and "security
+// trust-settings-export -d" to discover the set of certs with some
+// user-tweaked trust policy. We're too lazy to parse the XML
+// (Issue 26830) to understand what the trust
+// policy actually is. We just learn that there is _some_ policy.
//
-// 2. Run "security find-certificate" to dump the list of system root
-// CAs in PEM format.
+// 2. Run "security find-certificate" to dump the list of system root
+// CAs in PEM format.
//
-// 3. For each dumped cert, conditionally verify it with "security
-// verify-cert" if that cert was in the set discovered in Step 1.
-// Without the Step 1 optimization, running "security verify-cert"
-// 150-200 times takes 3.5 seconds. With the optimization, the
-// whole process takes about 180 milliseconds with 1 untrusted root
-// CA. (Compared to 110ms in the cgo path)
+// 3. For each dumped cert, conditionally verify it with "security
+// verify-cert" if that cert was in the set discovered in Step 1.
+// Without the Step 1 optimization, running "security verify-cert"
+// 150-200 times takes 3.5 seconds. With the optimization, the
+// whole process takes about 180 milliseconds with 1 untrusted root
+// CA. (Compared to 110ms in the cgo path)
func execSecurityRoots() (*CertPool, error) {
hasPolicy, err := getCertsWithTrustPolicy()
if err != nil {
return nil, err
}
- if debugExecDarwinRoots {
- println(fmt.Sprintf("crypto/x509: %d certs have a trust policy", len(hasPolicy)))
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: %d certs have a trust policy\n", len(hasPolicy))
}
- args := []string{"find-certificate", "-a", "-p",
- "/System/Library/Keychains/SystemRootCertificates.keychain",
- "/Library/Keychains/System.keychain",
- }
+ keychains := []string{"/Library/Keychains/System.keychain"}
+ // Note that this results in trusting roots from $HOME/... (the environment
+ // variable), which might not be expected.
u, err := user.Current()
if err != nil {
- if debugExecDarwinRoots {
- println(fmt.Sprintf("crypto/x509: get current user: %v", err))
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: can't get user home directory: %v\n", err)
}
} else {
- args = append(args,
+ keychains = append(keychains,
filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain"),
// Fresh installs of Sierra use a slightly different path for the login keychain
@@ -81,21 +79,19 @@ func execSecurityRoots() (*CertPool, error) {
)
}
- cmd := exec.Command("/usr/bin/security", args...)
- data, err := cmd.Output()
- if err != nil {
- return nil, err
+ type rootCandidate struct {
+ c *Certificate
+ system bool
}
var (
mu sync.Mutex
roots = NewCertPool()
numVerified int // number of execs of 'security verify-cert', for debug stats
+ wg sync.WaitGroup
+ verifyCh = make(chan rootCandidate)
)
- blockCh := make(chan *pem.Block)
- var wg sync.WaitGroup
-
// Using 4 goroutines to pipe into verify-cert seems to be
// about the best we can do. The verify-cert binary seems to
// just RPC to another server with coarse locking anyway, so
@@ -109,31 +105,62 @@ func execSecurityRoots() (*CertPool, error) {
wg.Add(1)
go func() {
defer wg.Done()
- for block := range blockCh {
- cert, err := ParseCertificate(block.Bytes)
- if err != nil {
- continue
- }
- sha1CapHex := fmt.Sprintf("%X", sha1.Sum(block.Bytes))
+ for cert := range verifyCh {
+ sha1CapHex := fmt.Sprintf("%X", sha1.Sum(cert.c.Raw))
- valid := true
+ var valid bool
verifyChecks := 0
if hasPolicy[sha1CapHex] {
verifyChecks++
- if !verifyCertWithSystem(block, cert) {
- valid = false
- }
+ valid = verifyCertWithSystem(cert.c)
+ } else {
+ // Certificates not in SystemRootCertificates without user
+ // or admin trust settings are not trusted.
+ valid = cert.system
}
mu.Lock()
numVerified += verifyChecks
if valid {
- roots.AddCert(cert)
+ roots.AddCert(cert.c)
}
mu.Unlock()
}
}()
}
+ err = forEachCertInKeychains(keychains, func(cert *Certificate) {
+ verifyCh <- rootCandidate{c: cert, system: false}
+ })
+ if err != nil {
+ close(verifyCh)
+ return nil, err
+ }
+ err = forEachCertInKeychains([]string{
+ "/System/Library/Keychains/SystemRootCertificates.keychain",
+ }, func(cert *Certificate) {
+ verifyCh <- rootCandidate{c: cert, system: true}
+ })
+ if err != nil {
+ close(verifyCh)
+ return nil, err
+ }
+ close(verifyCh)
+ wg.Wait()
+
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: ran security verify-cert %d times\n", numVerified)
+ }
+
+ return roots, nil
+}
+
+func forEachCertInKeychains(paths []string, f func(*Certificate)) error {
+ args := append([]string{"find-certificate", "-a", "-p"}, paths...)
+ cmd := exec.Command("/usr/bin/security", args...)
+ data, err := cmd.Output()
+ if err != nil {
+ return err
+ }
for len(data) > 0 {
var block *pem.Block
block, data = pem.Decode(data)
@@ -143,24 +170,21 @@ func execSecurityRoots() (*CertPool, error) {
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
- blockCh <- block
- }
- close(blockCh)
- wg.Wait()
-
- if debugExecDarwinRoots {
- mu.Lock()
- defer mu.Unlock()
- println(fmt.Sprintf("crypto/x509: ran security verify-cert %d times", numVerified))
+ cert, err := ParseCertificate(block.Bytes)
+ if err != nil {
+ continue
+ }
+ f(cert)
}
-
- return roots, nil
+ return nil
}
-func verifyCertWithSystem(block *pem.Block, cert *Certificate) bool {
- data := pem.EncodeToMemory(block)
+func verifyCertWithSystem(cert *Certificate) bool {
+ data := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE", Bytes: cert.Raw,
+ })
- f, err := ioutil.TempFile("", "cert")
+ f, err := os.CreateTemp("", "cert")
if err != nil {
fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err)
return false
@@ -174,19 +198,19 @@ func verifyCertWithSystem(block *pem.Block, cert *Certificate) bool {
fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err)
return false
}
- cmd := exec.Command("/usr/bin/security", "verify-cert", "-c", f.Name(), "-l", "-L")
+ cmd := exec.Command("/usr/bin/security", "verify-cert", "-p", "ssl", "-c", f.Name(), "-l", "-L")
var stderr bytes.Buffer
- if debugExecDarwinRoots {
+ if debugDarwinRoots {
cmd.Stderr = &stderr
}
if err := cmd.Run(); err != nil {
- if debugExecDarwinRoots {
- println(fmt.Sprintf("crypto/x509: verify-cert rejected %s: %q", cert.Subject.CommonName, bytes.TrimSpace(stderr.Bytes())))
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert rejected %s: %q\n", cert.Subject, bytes.TrimSpace(stderr.Bytes()))
}
return false
}
- if debugExecDarwinRoots {
- println(fmt.Sprintf("crypto/x509: verify-cert approved %s", cert.Subject.CommonName))
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert approved %s\n", cert.Subject)
}
return true
}
@@ -199,7 +223,7 @@ func verifyCertWithSystem(block *pem.Block, cert *Certificate) bool {
// settings. This code is only used for cgo-disabled builds.
func getCertsWithTrustPolicy() (map[string]bool, error) {
set := map[string]bool{}
- td, err := ioutil.TempDir("", "x509trustpolicy")
+ td, err := os.MkdirTemp("", "x509trustpolicy")
if err != nil {
return nil, err
}
@@ -218,8 +242,8 @@ func getCertsWithTrustPolicy() (map[string]bool, error) {
// Rather than match on English substrings that are probably
// localized on macOS, just interpret any failure to mean that
// there are no trust settings.
- if debugExecDarwinRoots {
- println(fmt.Sprintf("crypto/x509: exec %q: %v, %s", cmd.Args, err, stderr.Bytes()))
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: exec %q: %v, %s\n", cmd.Args, err, stderr.Bytes())
}
return nil
}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go
index fcbbd6b170..5c93349b0b 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build cgo && darwin && (arm || arm64 || ios)
// +build cgo
// +build darwin
// +build arm arm64 ios
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_js.go b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go
new file mode 100644
index 0000000000..4240207a0a
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js && wasm
+// +build js,wasm
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{}
+
+func loadSystemRoots() (*CertPool, error) {
+ return NewCertPool(), nil
+}
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go
index aa1785e4c6..267775dc5f 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go
@@ -11,4 +11,5 @@ var certFiles = []string{
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cacert.pem", // OpenELEC
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
+ "/etc/ssl/cert.pem", // Alpine Linux
}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_nacl.go b/vendor/github.com/google/certificate-transparency-go/x509/root_nacl.go
deleted file mode 100644
index 4413f64738..0000000000
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_nacl.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x509
-
-// Possible certificate files; stop after finding one.
-var certFiles = []string{}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go
index 2ac4666aff..2ee1d5ce80 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !cgo
// +build !cgo
package x509
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go
index ebeb7dfccd..2bdb2fe713 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build plan9
// +build plan9
package x509
import (
- "io/ioutil"
"os"
)
@@ -24,7 +24,7 @@ func loadSystemRoots() (*CertPool, error) {
roots := NewCertPool()
var bestErr error
for _, file := range certFiles {
- data, err := ioutil.ReadFile(file)
+ data, err := os.ReadFile(file)
if err == nil {
roots.AppendCertsFromPEM(data)
return roots, nil
@@ -33,5 +33,8 @@ func loadSystemRoots() (*CertPool, error) {
bestErr = err
}
}
+ if bestErr == nil {
+ return roots, nil
+ }
return nil, bestErr
}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go
index 65b5a5fdbc..d00842a81d 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build dragonfly freebsd linux nacl netbsd openbsd solaris
+//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build dragonfly freebsd linux netbsd openbsd solaris
package x509
import (
- "io/ioutil"
"os"
)
@@ -45,7 +45,7 @@ func loadSystemRoots() (*CertPool, error) {
var firstErr error
for _, file := range files {
- data, err := ioutil.ReadFile(file)
+ data, err := os.ReadFile(file)
if err == nil {
roots.AppendCertsFromPEM(data)
break
@@ -61,7 +61,7 @@ func loadSystemRoots() (*CertPool, error) {
}
for _, directory := range dirs {
- fis, err := ioutil.ReadDir(directory)
+ fis, err := os.ReadDir(directory)
if err != nil {
if firstErr == nil && !os.IsNotExist(err) {
firstErr = err
@@ -70,7 +70,7 @@ func loadSystemRoots() (*CertPool, error) {
}
rootsAdded := false
for _, fi := range fis {
- data, err := ioutil.ReadFile(directory + "/" + fi.Name())
+ data, err := os.ReadFile(directory + "/" + fi.Name())
if err == nil && roots.AppendCertsFromPEM(data) {
rootsAdded = true
}
@@ -80,7 +80,7 @@ func loadSystemRoots() (*CertPool, error) {
}
}
- if len(roots.certs) > 0 {
+ if len(roots.certs) > 0 || firstErr == nil {
return roots, nil
}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go
index 304ad3a679..39ec95ef3a 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go
@@ -61,15 +61,15 @@ func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain
return nil, errors.New("x509: invalid simple chain")
}
- simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:]
+ simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:count:count]
lastChain := simpleChains[count-1]
- elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:]
+ elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:lastChain.NumElements:lastChain.NumElements]
for i := 0; i < int(lastChain.NumElements); i++ {
// Copy the buf, since ParseCertificate does not create its own copy.
cert := elements[i].CertContext
- encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
+ encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length]
buf := make([]byte, cert.Length)
- copy(buf, encodedCert[:])
+ copy(buf, encodedCert)
parsedCert, err := ParseCertificate(buf)
if err != nil {
return nil, err
@@ -219,17 +219,37 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
if err != nil {
return nil, err
}
+ if len(chain) < 1 {
+ return nil, errors.New("x509: internal error: system verifier returned an empty chain")
+ }
- chains = append(chains, chain)
+ // Mitigate CVE-2020-0601, where the Windows system verifier might be
+ // tricked into using custom curve parameters for a trusted root, by
+ // double-checking all ECDSA signatures. If the system was tricked into
+ // using spoofed parameters, the signature will be invalid for the correct
+ // ones we parsed. (We don't support custom curves ourselves.)
+ for i, parent := range chain[1:] {
+ if parent.PublicKeyAlgorithm != ECDSA {
+ continue
+ }
+ if err := parent.CheckSignature(chain[i].SignatureAlgorithm,
+ chain[i].RawTBSCertificate, chain[i].Signature); err != nil {
+ return nil, err
+ }
+ }
- return chains, nil
+ return [][]*Certificate{chain}, nil
}
func loadSystemRoots() (*CertPool, error) {
// TODO: restore this functionality on Windows. We tried to do
// it in Go 1.8 but had to revert it. See Issue 18609.
// Returning (nil, nil) was the old behavior, prior to CL 30578.
- return nil, nil
+ // The if statement here avoids vet complaining about
+ // unreachable code below.
+ if true {
+ return nil, nil
+ }
const CRYPT_E_NOT_FOUND = 0x80092004
@@ -255,7 +275,7 @@ func loadSystemRoots() (*CertPool, error) {
break
}
// Copy the buf, since ParseCertificate does not create its own copy.
- buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
+ buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length]
buf2 := make([]byte, cert.Length)
copy(buf2, buf)
if c, err := ParseCertificate(buf2); err == nil {
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/rpki.go b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go
new file mode 100644
index 0000000000..520d6dc3ab
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go
@@ -0,0 +1,242 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/google/certificate-transparency-go/asn1"
+)
+
+// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string,
+// where the BitLength field holds the prefix length.
+type IPAddressPrefix asn1.BitString
+
+// IPAddressRange describes an (inclusive) IP address range.
+type IPAddressRange struct {
+ Min IPAddressPrefix
+ Max IPAddressPrefix
+}
+
+// Most relevant values for AFI from:
+// http://www.iana.org/assignments/address-family-numbers.
+const (
+ IPv4AddressFamilyIndicator = uint16(1)
+ IPv6AddressFamilyIndicator = uint16(2)
+)
+
+// IPAddressFamilyBlocks describes a set of ranges of IP addresses.
+type IPAddressFamilyBlocks struct {
+ // AFI holds an address family indicator from
+ // http://www.iana.org/assignments/address-family-numbers.
+ AFI uint16
+ // SAFI holds a subsequent address family indicator from
+ // http://www.iana.org/assignments/safi-namespace.
+ SAFI byte
+ // InheritFromIssuer indicates that the set of addresses should
+ // be taken from the issuer's certificate.
+ InheritFromIssuer bool
+ // AddressPrefixes holds prefixes if InheritFromIssuer is false.
+ AddressPrefixes []IPAddressPrefix
+ // AddressRanges holds ranges if InheritFromIssuer is false.
+ AddressRanges []IPAddressRange
+}
+
+// Internal types for asn1 unmarshalling.
+type ipAddressFamily struct {
+ AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI
+ Choice asn1.RawValue
+}
+
+// Internally, use raw asn1.BitString rather than the IPAddressPrefix
+// type alias (so that asn1.Unmarshal() decodes properly).
+type ipAddressRange struct {
+ Min asn1.BitString
+ Max asn1.BitString
+}
+
+func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks {
+ // RFC 3779 2.2.3
+ // IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
+ //
+ // IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
+ // addressFamily OCTET STRING (SIZE (2..3)),
+ // ipAddressChoice IPAddressChoice }
+ //
+ // IPAddressChoice ::= CHOICE {
+ // inherit NULL, -- inherit from issuer --
+ // addressesOrRanges SEQUENCE OF IPAddressOrRange }
+ //
+ // IPAddressOrRange ::= CHOICE {
+ // addressPrefix IPAddress,
+ // addressRange IPAddressRange }
+ //
+ // IPAddressRange ::= SEQUENCE {
+ // min IPAddress,
+ // max IPAddress }
+ //
+ // IPAddress ::= BIT STRING
+
+ var addrBlocks []ipAddressFamily
+ if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err))
+ return nil
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after ipAddrBlocks extension"))
+ return nil
+ }
+
+ var results []*IPAddressFamilyBlocks
+ for i, block := range addrBlocks {
+ var fam IPAddressFamilyBlocks
+ if l := len(block.AddressFamily); l < 2 || l > 3 {
+ nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l))
+ continue
+ }
+ fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2])
+ if len(block.AddressFamily) > 2 {
+ fam.SAFI = block.AddressFamily[2]
+ }
+ // IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
+ // tagging of the alternatives -- here, either NULL or SEQUENCE OF.
+ if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) {
+ fam.InheritFromIssuer = true
+ results = append(results, &fam)
+ continue
+ }
+
+ var addrRanges []asn1.RawValue
+ if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err))
+ continue
+ }
+ for j, ar := range addrRanges {
+ // Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit)
+ // tags -- here, either BIT STRING or SEQUENCE.
+ switch ar.Tag {
+ case asn1.TagBitString:
+ // BIT STRING for single prefix IPAddress
+ var val asn1.BitString
+ if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err))
+ continue
+ }
+ fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val))
+
+ case asn1.TagSequence:
+ var val ipAddressRange
+ if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err))
+ continue
+ }
+ fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)})
+
+ default:
+ nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar))
+ }
+ }
+ results = append(results, &fam)
+ }
+ return results
+}
+
+// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing
+// domain identifiers).
+type ASIDRange struct {
+ Min int
+ Max int
+}
+
+// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing
+// domain identifiers).
+type ASIdentifiers struct {
+ // InheritFromIssuer indicates that the set of AS identifiers should
+ // be taken from the issuer's certificate.
+ InheritFromIssuer bool
+ // ASIDs holds AS identifiers if InheritFromIssuer is false.
+ ASIDs []int
+ // ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false.
+ ASIDRanges []ASIDRange
+}
+
+type asIdentifiers struct {
+ ASNum asn1.RawValue `asn1:"optional,tag:0"`
+ RDI asn1.RawValue `asn1:"optional,tag:1"`
+}
+
+func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers {
+ // RFC 3779 2.3.2
+ // ASIdentifierChoice ::= CHOICE {
+ // inherit NULL, -- inherit from issuer --
+ // asIdsOrRanges SEQUENCE OF ASIdOrRange }
+ // ASIdOrRange ::= CHOICE {
+ // id ASId,
+ // range ASRange }
+ // ASRange ::= SEQUENCE {
+ // min ASId,
+ // max ASId }
+ // ASId ::= INTEGER
+ if len(val.FullBytes) == 0 { // OPTIONAL
+ return nil
+ }
+ // ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
+ // tagging of the alternatives -- here, either NULL or SEQUENCE OF.
+ if bytes.Equal(val.Bytes, asn1.NullBytes) {
+ return &ASIdentifiers{InheritFromIssuer: true}
+ }
+ var ids []asn1.RawValue
+ if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err))
+ return nil
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges"))
+ return nil
+ }
+ var asID ASIdentifiers
+ for i, id := range ids {
+ // Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit)
+ // tags -- here, either INTEGER or SEQUENCE.
+ switch id.Tag {
+ case asn1.TagInteger:
+ var val int
+ if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err))
+ continue
+ }
+ asID.ASIDs = append(asID.ASIDs, val)
+
+ case asn1.TagSequence:
+ var val ASIDRange
+ if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err))
+ continue
+ }
+ asID.ASIDRanges = append(asID.ASIDRanges, val)
+
+ default:
+ nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id))
+ }
+ }
+ return &asID
+}
+
+func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) {
+ // RFC 3779 2.3.2
+ // ASIdentifiers ::= SEQUENCE {
+ // asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
+ // rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
+ var asIDs asIdentifiers
+ if rest, err := asn1.Unmarshal(data, &asIDs); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err))
+ return nil, nil
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after ASIdentifiers extension"))
+ return nil, nil
+ }
+ return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/sec1.go b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go
index ae4f81e560..d19407079f 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/sec1.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go
@@ -18,8 +18,10 @@ const ecPrivKeyVersion = 1
// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
// References:
-// RFC 5915
-// SEC1 - http://www.secg.org/sec1-v2.pdf
+//
+// RFC 5915
+// SEC1 - http://www.secg.org/sec1-v2.pdf
+//
// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
// most cases it is not.
type ecPrivateKey struct {
@@ -29,12 +31,18 @@ type ecPrivateKey struct {
PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
}
-// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
+// ParseECPrivateKey parses an EC private key in SEC 1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) {
return parseECPrivateKey(nil, der)
}
-// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format.
+// MarshalECPrivateKey converts an EC private key to SEC 1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
+// For a more flexible key format which is not EC specific, use
+// MarshalPKCS8PrivateKey.
func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
oid, ok := OIDFromNamedCurve(key.Curve)
if !ok {
@@ -66,17 +74,24 @@ func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier
func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
var privKey ecPrivateKey
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)")
+ }
+ if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)")
+ }
return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
}
if privKey.Version != ecPrivKeyVersion {
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
}
+ var nfe NonFatalErrors
var curve elliptic.Curve
if namedCurveOID != nil {
- curve = namedCurveFromOID(*namedCurveOID)
+ curve = namedCurveFromOID(*namedCurveOID, &nfe)
} else {
- curve = namedCurveFromOID(privKey.NamedCurveOID)
+ curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe)
}
if curve == nil {
return nil, errors.New("x509: unknown elliptic curve")
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/verify.go b/vendor/github.com/google/certificate-transparency-go/x509/verify.go
index beafc3b000..07118c2bf6 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/verify.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/verify.go
@@ -10,16 +10,17 @@ import (
"fmt"
"net"
"net/url"
+ "os"
"reflect"
"runtime"
- "strconv"
"strings"
"time"
"unicode/utf8"
-
- "github.com/google/certificate-transparency-go/asn1"
)
+// ignoreCN disables interpreting Common Name as a hostname. See issue 24151.
+var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1")
+
type InvalidReason int
const (
@@ -44,21 +45,25 @@ const (
NameMismatch
// NameConstraintsWithoutSANs results when a leaf certificate doesn't
// contain a Subject Alternative Name extension, but a CA certificate
- // contains name constraints.
+ // contains name constraints, and the Common Name can be interpreted as
+ // a hostname.
+ //
+ // You can avoid this error by setting the experimental GODEBUG environment
+ // variable to "x509ignoreCN=1", disabling Common Name matching entirely.
+ // This behavior might become the default in the future.
NameConstraintsWithoutSANs
// UnconstrainedName results when a CA certificate contains permitted
// name constraints, but leaf certificate contains a name of an
// unsupported or unconstrained type.
UnconstrainedName
- // TooManyConstraints results when the number of comparision operations
+ // TooManyConstraints results when the number of comparison operations
// needed to check a certificate exceeds the limit set by
// VerifyOptions.MaxConstraintComparisions. This limit exists to
// prevent pathological certificates can consuming excessive amounts of
// CPU time to verify.
TooManyConstraints
// CANotAuthorizedForExtKeyUsage results when an intermediate or root
- // certificate does not permit an extended key usage that is claimed by
- // the leaf certificate.
+ // certificate does not permit a requested extended key usage.
CANotAuthorizedForExtKeyUsage
)
@@ -75,7 +80,7 @@ func (e CertificateInvalidError) Error() string {
case NotAuthorizedToSign:
return "x509: certificate is not authorized to sign other certificates"
case Expired:
- return "x509: certificate has expired or is not yet valid"
+ return "x509: certificate has expired or is not yet valid: " + e.Detail
case CANotAuthorizedForThisName:
return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail
case CANotAuthorizedForExtKeyUsage:
@@ -83,7 +88,7 @@ func (e CertificateInvalidError) Error() string {
case TooManyIntermediates:
return "x509: too many intermediates for path length constraint"
case IncompatibleUsage:
- return "x509: certificate specifies an incompatible key usage: " + e.Detail
+ return "x509: certificate specifies an incompatible key usage"
case NameMismatch:
return "x509: issuer name does not match subject from issuing certificate"
case NameConstraintsWithoutSANs:
@@ -104,6 +109,12 @@ type HostnameError struct {
func (h HostnameError) Error() string {
c := h.Certificate
+ if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) &&
+ matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) {
+ // This would have validated, if it weren't for the validHostname check on Common Name.
+ return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName
+ }
+
var valid string
if ip := net.ParseIP(h.Host); ip != nil {
// Trying to validate an IP
@@ -117,10 +128,10 @@ func (h HostnameError) Error() string {
valid += san.String()
}
} else {
- if c.hasSANExtension() {
- valid = strings.Join(c.DNSNames, ", ")
- } else {
+ if c.commonNameAsHostname() {
valid = c.Subject.CommonName
+ } else {
+ valid = strings.Join(c.DNSNames, ", ")
}
}
@@ -193,9 +204,8 @@ type VerifyOptions struct {
// list means ExtKeyUsageServerAuth. To accept any key usage, include
// ExtKeyUsageAny.
//
- // Certificate chains are required to nest extended key usage values,
- // irrespective of this value. This matches the Windows CryptoAPI behavior,
- // but not the spec.
+ // Certificate chains are required to nest these extended key usage values.
+ // (This matches the Windows CryptoAPI behavior, but not the spec.)
KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If
@@ -219,10 +229,9 @@ type rfc2821Mailbox struct {
}
// parseRFC2821Mailbox parses an email address into local and domain parts,
-// based on the ABNF for a “Mailbox” from RFC 2821. According to
-// https://tools.ietf.org/html/rfc5280#section-4.2.1.6 that's correct for an
-// rfc822Name from a certificate: “The format of an rfc822Name is a "Mailbox"
-// as defined in https://tools.ietf.org/html/rfc2821#section-4.1.2”.
+// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280,
+// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The
+// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”.
func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
if len(in) == 0 {
return mailbox, false
@@ -239,9 +248,8 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
// quoted-pair = ("\" text) / obs-qp
// text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text
//
- // (Names beginning with “obs-” are the obsolete syntax from
- // https://tools.ietf.org/html/rfc2822#section-4. Since it has
- // been 16 years, we no longer accept that.)
+ // (Names beginning with “obs-” are the obsolete syntax from RFC 2822,
+ // Section 4. Since it has been 16 years, we no longer accept that.)
in = in[1:]
QuotedString:
for {
@@ -295,7 +303,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
// Atom ("." Atom)*
NextChar:
for len(in) > 0 {
- // atext from https://tools.ietf.org/html/rfc2822#section-3.2.4
+ // atext from RFC 2822, Section 3.2.4
c := in[0]
switch {
@@ -331,7 +339,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
return mailbox, false
}
- // https://tools.ietf.org/html/rfc3696#section-3
+ // From RFC 3696, Section 3:
// “period (".") may also appear, but may not be used to start
// or end the local part, nor may two or more consecutive
// periods appear.”
@@ -368,7 +376,7 @@ func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) {
reverseLabels = append(reverseLabels, domain)
domain = ""
} else {
- reverseLabels = append(reverseLabels, domain[i+1:len(domain)])
+ reverseLabels = append(reverseLabels, domain[i+1:])
domain = domain[:i]
}
}
@@ -412,7 +420,7 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro
}
func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
- // https://tools.ietf.org/html/rfc5280#section-4.2.1.10
+ // From RFC 5280, Section 4.2.1.10:
// “a uniformResourceIdentifier that does not include an authority
// component with a host name specified as a fully qualified domain
// name (e.g., if the URI either does not include an authority
@@ -557,51 +565,6 @@ func (c *Certificate) checkNameConstraints(count *int,
return nil
}
-const (
- checkingAgainstIssuerCert = iota
- checkingAgainstLeafCert
-)
-
-// ekuPermittedBy returns true iff the given extended key usage is permitted by
-// the given EKU from a certificate. Normally, this would be a simple
-// comparison plus a special case for the “any” EKU. But, in order to support
-// existing certificates, some exceptions are made.
-func ekuPermittedBy(eku, certEKU ExtKeyUsage, context int) bool {
- if certEKU == ExtKeyUsageAny || eku == certEKU {
- return true
- }
-
- // Some exceptions are made to support existing certificates. Firstly,
- // the ServerAuth and SGC EKUs are treated as a group.
- mapServerAuthEKUs := func(eku ExtKeyUsage) ExtKeyUsage {
- if eku == ExtKeyUsageNetscapeServerGatedCrypto || eku == ExtKeyUsageMicrosoftServerGatedCrypto {
- return ExtKeyUsageServerAuth
- }
- return eku
- }
-
- eku = mapServerAuthEKUs(eku)
- certEKU = mapServerAuthEKUs(certEKU)
-
- if eku == certEKU {
- return true
- }
-
- // If checking a requested EKU against the list in a leaf certificate there
- // are fewer exceptions.
- if context == checkingAgainstLeafCert {
- return false
- }
-
- // ServerAuth in a CA permits ClientAuth in the leaf.
- return (eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
- // Any CA may issue an OCSP responder certificate.
- eku == ExtKeyUsageOCSPSigning ||
- // Code-signing CAs can use Microsoft's commercial and
- // kernel-mode EKUs.
- (eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning
-}
-
// isValid performs validity checks on c given that it is a candidate to append
// to the chain in currentChain.
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
@@ -621,8 +584,18 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
if now.IsZero() {
now = time.Now()
}
- if now.Before(c.NotBefore) || now.After(c.NotAfter) {
- return CertificateInvalidError{c, Expired, ""}
+ if now.Before(c.NotBefore) {
+ return CertificateInvalidError{
+ Cert: c,
+ Reason: Expired,
+ Detail: fmt.Sprintf("current time %s is before %s", now.Format(time.RFC3339), c.NotBefore.Format(time.RFC3339)),
+ }
+ } else if now.After(c.NotAfter) {
+ return CertificateInvalidError{
+ Cert: c,
+ Reason: Expired,
+ Detail: fmt.Sprintf("current time %s is after %s", now.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)),
+ }
}
}
@@ -640,17 +613,16 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
leaf = currentChain[0]
}
- if !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() {
- sanExtension, ok := leaf.getSANExtension()
- if !ok {
- // This is the deprecated, legacy case of depending on
- // the CN as a hostname. Chains modern enough to be
- // using name constraints should not be depending on
- // CNs.
- return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""}
- }
-
- err := forEachSAN(sanExtension, func(tag int, data []byte) error {
+ checkNameConstraints := !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints()
+ if checkNameConstraints && leaf.commonNameAsHostname() {
+ // This is the deprecated, legacy case of depending on the commonName as
+ // a hostname. We don't enforce name constraints against the CN, but
+ // VerifyHostname will look for hostnames in there if there are no SANs.
+ // In order to ensure VerifyHostname will not accept an unchecked name,
+ // return an error here.
+ return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""}
+ } else if checkNameConstraints && leaf.hasSANExtension() {
+ err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error {
switch tag {
case nameTypeEmail:
name := string(data)
@@ -718,59 +690,6 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
}
}
- checkEKUs := !opts.DisableEKUChecks && certType == intermediateCertificate
-
- // If no extended key usages are specified, then all are acceptable.
- if checkEKUs && (len(c.ExtKeyUsage) == 0 && len(c.UnknownExtKeyUsage) == 0) {
- checkEKUs = false
- }
-
- // If the “any” key usage is permitted, then no more checks are needed.
- if checkEKUs {
- for _, caEKU := range c.ExtKeyUsage {
- comparisonCount++
- if caEKU == ExtKeyUsageAny {
- checkEKUs = false
- break
- }
- }
- }
-
- if checkEKUs {
- NextEKU:
- for _, eku := range leaf.ExtKeyUsage {
- if comparisonCount > maxConstraintComparisons {
- return CertificateInvalidError{c, TooManyConstraints, ""}
- }
-
- for _, caEKU := range c.ExtKeyUsage {
- comparisonCount++
- if ekuPermittedBy(eku, caEKU, checkingAgainstIssuerCert) {
- continue NextEKU
- }
- }
-
- oid, _ := oidFromExtKeyUsage(eku)
- return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", oid)}
- }
-
- NextUnknownEKU:
- for _, eku := range leaf.UnknownExtKeyUsage {
- if comparisonCount > maxConstraintComparisons {
- return CertificateInvalidError{c, TooManyConstraints, ""}
- }
-
- for _, caEKU := range c.UnknownExtKeyUsage {
- comparisonCount++
- if caEKU.Equal(eku) {
- continue NextUnknownEKU
- }
- }
-
- return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", eku)}
- }
- }
-
// KeyUsage status flags are ignored. From Engineering Security, Peter
// Gutmann: A European government CA marked its signing certificates as
// being valid for encryption only, but no-one noticed. Another
@@ -802,18 +721,6 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
return nil
}
-// formatOID formats an ASN.1 OBJECT IDENTIFER in the common, dotted style.
-func formatOID(oid asn1.ObjectIdentifier) string {
- ret := ""
- for i, v := range oid {
- if i > 0 {
- ret += "."
- }
- ret += strconv.Itoa(v)
- }
- return ret
-}
-
// Verify attempts to verify c by building one or more chains from c to a
// certificate in opts.Roots, using certificates in opts.Intermediates if
// needed. If successful, it returns one or more chains where the first
@@ -871,63 +778,38 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
}
}
- requestedKeyUsages := make([]ExtKeyUsage, len(opts.KeyUsages))
- copy(requestedKeyUsages, opts.KeyUsages)
- if len(requestedKeyUsages) == 0 {
- requestedKeyUsages = append(requestedKeyUsages, ExtKeyUsageServerAuth)
+ var candidateChains [][]*Certificate
+ if opts.Roots.contains(c) {
+ candidateChains = append(candidateChains, []*Certificate{c})
+ } else {
+ if candidateChains, err = c.buildChains(nil, []*Certificate{c}, nil, &opts); err != nil {
+ return nil, err
+ }
}
- // If no key usages are specified, then any are acceptable.
- checkEKU := !opts.DisableEKUChecks && len(c.ExtKeyUsage) > 0
-
- for _, eku := range requestedKeyUsages {
- if eku == ExtKeyUsageAny {
- checkEKU = false
- break
- }
+ keyUsages := opts.KeyUsages
+ if len(keyUsages) == 0 {
+ keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
}
- if checkEKU {
- foundMatch := false
- NextUsage:
- for _, eku := range requestedKeyUsages {
- for _, leafEKU := range c.ExtKeyUsage {
- if ekuPermittedBy(eku, leafEKU, checkingAgainstLeafCert) {
- foundMatch = true
- break NextUsage
- }
- }
+ // If any key usage is acceptable then we're done.
+ for _, usage := range keyUsages {
+ if usage == ExtKeyUsageAny {
+ return candidateChains, nil
}
+ }
- if !foundMatch {
- msg := "leaf contains the following, recognized EKUs: "
-
- for i, leafEKU := range c.ExtKeyUsage {
- oid, ok := oidFromExtKeyUsage(leafEKU)
- if !ok {
- continue
- }
-
- if i > 0 {
- msg += ", "
- }
- msg += formatOID(oid)
- }
-
- return nil, CertificateInvalidError{c, IncompatibleUsage, msg}
+ for _, candidate := range candidateChains {
+ if opts.DisableEKUChecks || checkChainForKeyUsage(candidate, keyUsages) {
+ chains = append(chains, candidate)
}
}
- var candidateChains [][]*Certificate
- if opts.Roots.contains(c) {
- candidateChains = append(candidateChains, []*Certificate{c})
- } else {
- if candidateChains, err = c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts); err != nil {
- return nil, err
- }
+ if len(chains) == 0 {
+ return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
}
- return candidateChains, nil
+ return chains, nil
}
func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
@@ -937,64 +819,138 @@ func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate
return n
}
-func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) {
- possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c)
-nextRoot:
- for _, rootNum := range possibleRoots {
- root := opts.Roots.certs[rootNum]
+// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls
+// that an invocation of buildChains will (tranistively) make. Most chains are
+// less than 15 certificates long, so this leaves space for multiple chains and
+// for failed checks due to different intermediates having the same Subject.
+const maxChainSignatureChecks = 100
+
+func (c *Certificate) buildChains(cache map[*Certificate][][]*Certificate, currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ var (
+ hintErr error
+ hintCert *Certificate
+ )
+ considerCandidate := func(certType int, candidate *Certificate) {
for _, cert := range currentChain {
- if cert.Equal(root) {
- continue nextRoot
+ if cert.Equal(candidate) {
+ return
}
}
- err = root.isValid(rootCertificate, currentChain, opts)
- if err != nil {
- continue
+ if sigChecks == nil {
+ sigChecks = new(int)
+ }
+ *sigChecks++
+ if *sigChecks > maxChainSignatureChecks {
+ err = errors.New("x509: signature check attempts limit reached while verifying certificate chain")
+ return
}
- chains = append(chains, appendToFreshChain(currentChain, root))
- }
- possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c)
-nextIntermediate:
- for _, intermediateNum := range possibleIntermediates {
- intermediate := opts.Intermediates.certs[intermediateNum]
- for _, cert := range currentChain {
- if cert.Equal(intermediate) {
- continue nextIntermediate
+ if err := c.CheckSignatureFrom(candidate); err != nil {
+ if hintErr == nil {
+ hintErr = err
+ hintCert = candidate
}
+ return
}
- err = intermediate.isValid(intermediateCertificate, currentChain, opts)
+
+ err = candidate.isValid(certType, currentChain, opts)
if err != nil {
- continue
+ return
}
- var childChains [][]*Certificate
- childChains, ok := cache[intermediateNum]
- if !ok {
- childChains, err = intermediate.buildChains(cache, appendToFreshChain(currentChain, intermediate), opts)
- cache[intermediateNum] = childChains
+
+ switch certType {
+ case rootCertificate:
+ chains = append(chains, appendToFreshChain(currentChain, candidate))
+ case intermediateCertificate:
+ if cache == nil {
+ cache = make(map[*Certificate][][]*Certificate)
+ }
+ childChains, ok := cache[candidate]
+ if !ok {
+ childChains, err = candidate.buildChains(cache, appendToFreshChain(currentChain, candidate), sigChecks, opts)
+ cache[candidate] = childChains
+ }
+ chains = append(chains, childChains...)
}
- chains = append(chains, childChains...)
+ }
+
+ for _, rootNum := range opts.Roots.findPotentialParents(c) {
+ considerCandidate(rootCertificate, opts.Roots.certs[rootNum])
+ }
+ for _, intermediateNum := range opts.Intermediates.findPotentialParents(c) {
+ considerCandidate(intermediateCertificate, opts.Intermediates.certs[intermediateNum])
}
if len(chains) > 0 {
err = nil
}
-
if len(chains) == 0 && err == nil {
- hintErr := rootErr
- hintCert := failedRoot
- if hintErr == nil {
- hintErr = intermediateErr
- hintCert = failedIntermediate
- }
err = UnknownAuthorityError{c, hintErr, hintCert}
}
return
}
+// validHostname reports whether host is a valid hostname that can be matched or
+// matched against according to RFC 6125 2.2, with some leniency to accommodate
+// legacy values.
+func validHostname(host string) bool {
+ host = strings.TrimSuffix(host, ".")
+
+ if len(host) == 0 {
+ return false
+ }
+
+ for i, part := range strings.Split(host, ".") {
+ if part == "" {
+ // Empty label.
+ return false
+ }
+ if i == 0 && part == "*" {
+ // Only allow full left-most wildcards, as those are the only ones
+ // we match, and matching literal '*' characters is probably never
+ // the expected behavior.
+ continue
+ }
+ for j, c := range part {
+ if 'a' <= c && c <= 'z' {
+ continue
+ }
+ if '0' <= c && c <= '9' {
+ continue
+ }
+ if 'A' <= c && c <= 'Z' {
+ continue
+ }
+ if c == '-' && j != 0 {
+ continue
+ }
+ if c == '_' || c == ':' {
+ // Not valid characters in hostnames, but commonly
+ // found in deployments outside the WebPKI.
+ continue
+ }
+ return false
+ }
+ }
+
+ return true
+}
+
+// commonNameAsHostname reports whether the Common Name field should be
+// considered the hostname that the certificate is valid for. This is a legacy
+// behavior, disabled if the Subject Alt Name extension is present.
+//
+// It applies the strict validHostname check to the Common Name field, so that
+// certificates without SANs can still be validated against CAs with name
+// constraints if there is no risk the CN would be matched as a hostname.
+// See NameConstraintsWithoutSANs and issue 24151.
+func (c *Certificate) commonNameAsHostname() bool {
+ return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName)
+}
+
func matchHostnames(pattern, host string) bool {
host = strings.TrimSuffix(host, ".")
pattern = strings.TrimSuffix(pattern, ".")
@@ -1064,7 +1020,7 @@ func (c *Certificate) VerifyHostname(h string) error {
}
if ip := net.ParseIP(candidateIP); ip != nil {
// We only match IP addresses against IP SANs.
- // https://tools.ietf.org/html/rfc6125#appendix-B.2
+ // See RFC 6125, Appendix B.2.
for _, candidate := range c.IPAddresses {
if ip.Equal(candidate) {
return nil
@@ -1075,16 +1031,79 @@ func (c *Certificate) VerifyHostname(h string) error {
lowered := toLowerCaseASCII(h)
- if c.hasSANExtension() {
+ if c.commonNameAsHostname() {
+ if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
+ return nil
+ }
+ } else {
for _, match := range c.DNSNames {
if matchHostnames(toLowerCaseASCII(match), lowered) {
return nil
}
}
- // If Subject Alt Name is given, we ignore the common name.
- } else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
- return nil
}
return HostnameError{c, h}
}
+
+func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
+ usages := make([]ExtKeyUsage, len(keyUsages))
+ copy(usages, keyUsages)
+
+ if len(chain) == 0 {
+ return false
+ }
+
+ usagesRemaining := len(usages)
+
+ // We walk down the list and cross out any usages that aren't supported
+ // by each certificate. If we cross out all the usages, then the chain
+ // is unacceptable.
+
+NextCert:
+ for i := len(chain) - 1; i >= 0; i-- {
+ cert := chain[i]
+ if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
+ // The certificate doesn't have any extended key usage specified.
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if usage == ExtKeyUsageAny {
+ // The certificate is explicitly good for any usage.
+ continue NextCert
+ }
+ }
+
+ const invalidUsage ExtKeyUsage = -1
+
+ NextRequestedUsage:
+ for i, requestedUsage := range usages {
+ if requestedUsage == invalidUsage {
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if requestedUsage == usage {
+ continue NextRequestedUsage
+ } else if requestedUsage == ExtKeyUsageServerAuth &&
+ (usage == ExtKeyUsageNetscapeServerGatedCrypto ||
+ usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
+ // In order to support COMODO
+ // certificate chains, we have to
+ // accept Netscape or Microsoft SGC
+ // usages as equal to ServerAuth.
+ continue NextRequestedUsage
+ }
+ }
+
+ usages[i] = invalidUsage
+ usagesRemaining--
+ if usagesRemaining == 0 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/x509.go b/vendor/github.com/google/certificate-transparency-go/x509/x509.go
index 23f2a6a228..3059a6facc 100644
--- a/vendor/github.com/google/certificate-transparency-go/x509/x509.go
+++ b/vendor/github.com/google/certificate-transparency-go/x509/x509.go
@@ -8,9 +8,43 @@
// can be used to override the system default locations for the SSL certificate
// file and SSL certificate files directory, respectively.
//
-// This is a fork of the go library crypto/x509 package, it's more relaxed
-// about certificates that it'll accept, and exports the TBSCertificate
-// structure.
+// This is a fork of the Go library crypto/x509 package, primarily adapted for
+// use with Certificate Transparency. Main areas of difference are:
+//
+// - Life as a fork:
+// - Rename OS-specific cgo code so it doesn't clash with main Go library.
+// - Use local library imports (asn1, pkix) throughout.
+// - Add version-specific wrappers for Go version-incompatible code (in
+// ptr_*_windows.go).
+// - Laxer certificate parsing:
+// - Add options to disable various validation checks (times, EKUs etc).
+// - Use NonFatalErrors type for some errors and continue parsing; this
+// can be checked with IsFatal(err).
+// - Support for short bitlength ECDSA curves (in curves.go).
+// - Certificate Transparency specific function:
+// - Parsing and marshaling of SCTList extension.
+// - RemoveSCTList() function for rebuilding CT leaf entry.
+// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(),
+// ParseTBSCertificate(), IsPrecertificate()).
+// - Revocation list processing:
+// - Detailed CRL parsing (in revoked.go)
+// - Detailed error recording mechanism (in error.go, errors.go)
+// - Factor out parseDistributionPoints() for reuse.
+// - Factor out and generalize GeneralNames parsing (in names.go)
+// - Fix CRL commenting.
+// - RPKI support:
+// - Support for SubjectInfoAccess extension
+// - Support for RFC3779 extensions (in rpki.go)
+// - RSAES-OAEP support:
+// - Support for parsing RSASES-OAEP public keys from certificates
+// - Ed25519 support:
+// - Support for parsing and marshaling Ed25519 keys
+// - General improvements:
+// - Export and use OID values throughout.
+// - Export OIDFromNamedCurve().
+// - Export SignatureAlgorithmFromAI().
+// - Add OID value to UnhandledCriticalExtension error.
+// - Minor typo/lint fixes.
package x509
import (
@@ -35,12 +69,13 @@ import (
"time"
"unicode/utf8"
+ "golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
+ "golang.org/x/crypto/ed25519"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509/pkix"
- "golang.org/x/crypto/cryptobyte"
)
// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
@@ -50,14 +85,12 @@ type pkixPublicKey struct {
BitString asn1.BitString
}
-// ParsePKIXPublicKey parses a DER encoded public key. These values are
-// typically found in PEM blocks with "BEGIN PUBLIC KEY".
+// ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form.
//
-// Supported key types include RSA, DSA, and ECDSA. Unknown key
-// types result in an error.
+// It returns a *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, or
+// ed25519.PublicKey. More types might be supported in the future.
//
-// On success, pub will be of type *rsa.PublicKey, *dsa.PublicKey,
-// or *ecdsa.PublicKey.
+// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
var pki publicKeyInfo
if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil {
@@ -69,7 +102,16 @@ func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
if algo == UnknownPublicKeyAlgorithm {
return nil, errors.New("x509: unknown public key algorithm")
}
- return parsePublicKey(algo, &pki)
+ var nfe NonFatalErrors
+ pub, err = parsePublicKey(algo, &pki, &nfe)
+ if err != nil {
+ return pub, err
+ }
+ // Treat non-fatal errors as fatal for this entrypoint.
+ if len(nfe.Errors) > 0 {
+ return nil, nfe.Errors[0]
+ }
+ return pub, nil
}
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
@@ -84,7 +126,7 @@ func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorith
}
publicKeyAlgorithm.Algorithm = OIDPublicKeyRSA
// This is a NULL parameters value which is required by
- // https://tools.ietf.org/html/rfc3279#section-2.3.1.
+ // RFC 3279, Section 2.3.1.
publicKeyAlgorithm.Parameters = asn1.NullRawValue
case *ecdsa.PublicKey:
publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
@@ -99,14 +141,22 @@ func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorith
return
}
publicKeyAlgorithm.Parameters.FullBytes = paramBytes
+ case ed25519.PublicKey:
+ publicKeyBytes = pub
+ publicKeyAlgorithm.Algorithm = OIDPublicKeyEd25519
default:
- return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: only RSA and ECDSA public keys supported")
+ return nil, pkix.AlgorithmIdentifier{}, fmt.Errorf("x509: unsupported public key type: %T", pub)
}
return publicKeyBytes, publicKeyAlgorithm, nil
}
-// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format.
+// MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form.
+//
+// The following key types are currently supported: *rsa.PublicKey, *ecdsa.PublicKey
+// and ed25519.PublicKey. Unsupported key types result in an error.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) {
var publicKeyBytes []byte
var publicKeyAlgorithm pkix.AlgorithmIdentifier
@@ -151,6 +201,15 @@ type tbsCertificate struct {
Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"`
}
+// RFC 4055, 4.1
+// The current ASN.1 parser does not support non-integer defaults so
+// the 'default:' tags here do nothing.
+type rsaesoaepAlgorithmParameters struct {
+ HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"`
+ MaskgenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"`
+ PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"`
+}
+
type dsaAlgorithmParameters struct {
P, Q, G *big.Int
}
@@ -197,6 +256,40 @@ const (
SHA256WithRSAPSS
SHA384WithRSAPSS
SHA512WithRSAPSS
+ PureEd25519
+)
+
+// RFC 4055, 6. Basic object identifiers
+var oidpSpecified = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 9}
+
+// These are the default parameters for an RSAES-OAEP pubkey.
+// The current ASN.1 parser does not support non-integer defaults so
+// these currently do nothing.
+var (
+ sha1Identifier = pkix.AlgorithmIdentifier{
+ Algorithm: oidSHA1,
+ Parameters: asn1.NullRawValue,
+ }
+ mgf1SHA1Identifier = pkix.AlgorithmIdentifier{
+ Algorithm: oidMGF1,
+ // RFC 4055, 2.1 sha1Identifier
+ Parameters: asn1.RawValue{
+ Class: asn1.ClassUniversal,
+ Tag: asn1.TagSequence,
+ IsCompound: false,
+ Bytes: []byte{6, 5, 43, 14, 3, 2, 26, 5, 0},
+ FullBytes: []byte{16, 9, 6, 5, 43, 14, 3, 2, 26, 5, 0}},
+ }
+ pSpecifiedEmptyIdentifier = pkix.AlgorithmIdentifier{
+ Algorithm: oidpSpecified,
+ // RFC 4055, 4.1 nullOctetString
+ Parameters: asn1.RawValue{
+ Class: asn1.ClassUniversal,
+ Tag: asn1.TagOctetString,
+ IsCompound: false,
+ Bytes: []byte{},
+ FullBytes: []byte{4, 0}},
+ }
)
func (algo SignatureAlgorithm) isRSAPSS() bool {
@@ -226,12 +319,16 @@ const (
RSA
DSA
ECDSA
+ Ed25519
+ RSAESOAEP
)
var publicKeyAlgoName = [...]string{
- RSA: "RSA",
- DSA: "DSA",
- ECDSA: "ECDSA",
+ RSA: "RSA",
+ DSA: "DSA",
+ ECDSA: "ECDSA",
+ Ed25519: "Ed25519",
+ RSAESOAEP: "RSAESOAEP",
}
func (algo PublicKeyAlgorithm) String() string {
@@ -290,6 +387,11 @@ func (algo PublicKeyAlgorithm) String() string {
//
// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
+//
+//
+// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers
+//
+// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 }
var (
oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
@@ -305,7 +407,9 @@ var (
oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+ oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112}
+ oidSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
@@ -341,10 +445,11 @@ var signatureAlgorithmDetails = []struct {
{ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256},
{ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384},
{ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512},
+ {PureEd25519, "Ed25519", oidSignatureEd25519, Ed25519, crypto.Hash(0) /* no pre-hashing */},
}
// pssParameters reflects the parameters in an AlgorithmIdentifier that
-// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3
+// specifies RSA PSS. See RFC 3447, Appendix A.2.3.
type pssParameters struct {
// The following three fields are not marked as
// optional because the default values specify SHA-1,
@@ -403,6 +508,14 @@ func rsaPSSParameters(hashFunc crypto.Hash) asn1.RawValue {
// SignatureAlgorithmFromAI converts an PKIX algorithm identifier to the
// equivalent local constant.
func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
+ if ai.Algorithm.Equal(oidSignatureEd25519) {
+ // RFC 8410, Section 3
+ // > For all of the OIDs, the parameters MUST be absent.
+ if len(ai.Parameters.FullBytes) != 0 {
+ return UnknownSignatureAlgorithm
+ }
+ }
+
if !ai.Algorithm.Equal(oidSignatureRSAPSS) {
for _, details := range signatureAlgorithmDetails {
if ai.Algorithm.Equal(details.oid) {
@@ -425,17 +538,15 @@ func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
return UnknownSignatureAlgorithm
}
- // PSS is greatly overburdened with options. This code forces
- // them into three buckets by requiring that the MGF1 hash
- // function always match the message hash function (as
- // recommended in
- // https://tools.ietf.org/html/rfc3447#section-8.1), that the
- // salt length matches the hash length, and that the trailer
- // field has the default value.
- if !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes) ||
+ // PSS is greatly overburdened with options. This code forces them into
+ // three buckets by requiring that the MGF1 hash function always match the
+ // message hash function (as recommended in RFC 3447, Section 8.1), that the
+ // salt length matches the hash length, and that the trailer field has the
+ // default value.
+ if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) ||
!params.MGF.Algorithm.Equal(oidMGF1) ||
!mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
- !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes) ||
+ (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) ||
params.TrailerField != 1 {
return UnknownSignatureAlgorithm
}
@@ -455,22 +566,26 @@ func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
// RFC 3279, 2.3 Public Key Algorithms
//
// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
-// rsadsi(113549) pkcs(1) 1 }
+//
+// rsadsi(113549) pkcs(1) 1 }
//
// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
//
// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
-// x9-57(10040) x9cm(4) 1 }
//
-// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
+// x9-57(10040) x9cm(4) 1 }
+//
+// # RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
//
-// id-ecPublicKey OBJECT IDENTIFIER ::= {
-// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
+// id-ecPublicKey OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
var (
OIDPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+ OIDPublicKeyRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7}
OIDPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
OIDPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
OIDPublicKeyRSAObsolete = asn1.ObjectIdentifier{2, 5, 8, 1, 1}
+ OIDPublicKeyEd25519 = oidSignatureEd25519
)
func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
@@ -481,34 +596,44 @@ func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm
return DSA
case oid.Equal(OIDPublicKeyECDSA):
return ECDSA
+ case oid.Equal(OIDPublicKeyRSAESOAEP):
+ return RSAESOAEP
+ case oid.Equal(OIDPublicKeyEd25519):
+ return Ed25519
}
return UnknownPublicKeyAlgorithm
}
// RFC 5480, 2.1.1.1. Named Curve
//
-// secp224r1 OBJECT IDENTIFIER ::= {
-// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
+// secp224r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
+//
+// secp256r1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
+// prime(1) 7 }
//
-// secp256r1 OBJECT IDENTIFIER ::= {
-// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
-// prime(1) 7 }
+// secp384r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
//
-// secp384r1 OBJECT IDENTIFIER ::= {
-// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
+// secp521r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
//
-// secp521r1 OBJECT IDENTIFIER ::= {
-// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
+// secp192r1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
+// prime(1) 1 }
//
-// NB: secp256r1 is equivalent to prime256v1
+// NB: secp256r1 is equivalent to prime256v1,
+// secp192r1 is equivalent to ansix9p192r and prime192v1
var (
OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
+ OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1}
)
-func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
+func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve {
switch {
case oid.Equal(OIDNamedCurveP224):
return elliptic.P224()
@@ -518,6 +643,9 @@ func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
return elliptic.P384()
case oid.Equal(OIDNamedCurveP521):
return elliptic.P521()
+ case oid.Equal(OIDNamedCurveP192):
+ nfe.AddError(errors.New("insecure curve (secp192r1) specified"))
+ return secp192r1()
}
return nil
}
@@ -534,6 +662,8 @@ func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
return OIDNamedCurveP384, true
case elliptic.P521():
return OIDNamedCurveP521, true
+ case secp192r1():
+ return OIDNamedCurveP192, true
}
return nil, false
@@ -737,6 +867,10 @@ type Certificate struct {
OCSPServer []string
IssuingCertificateURL []string
+ // Subject Information Access
+ SubjectTimestamps []string
+ SubjectCARepositories []string
+
// Subject Alternate Name values. (Note that these values may not be valid
// if invalid values were contained within a parsed certificate. For
// example, an element of DNSNames may not be a valid DNS domain name.)
@@ -761,6 +895,9 @@ type Certificate struct {
PolicyIdentifiers []asn1.ObjectIdentifier
+ RPKIAddressRanges []*IPAddressFamilyBlocks
+ RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers
+
// Certificate Transparency SCT extension contents; this is a TLS-encoded
// SignedCertificateTimestampList (RFC 6962 s3.3).
RawSCT []byte
@@ -791,6 +928,9 @@ func (ConstraintViolationError) Error() string {
// Equal indicates whether two Certificate objects are equal (by comparing their
// DER-encoded values).
func (c *Certificate) Equal(other *Certificate) bool {
+ if c == nil || other == nil {
+ return c == other
+ }
return bytes.Equal(c.Raw, other.Raw)
}
@@ -896,23 +1036,17 @@ func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature
}
func (c *Certificate) hasNameConstraints() bool {
- for _, e := range c.Extensions {
- if len(e.Id) == 4 && e.Id[0] == OIDExtensionNameConstraints[0] && e.Id[1] == OIDExtensionNameConstraints[1] && e.Id[2] == OIDExtensionNameConstraints[2] && e.Id[3] == OIDExtensionNameConstraints[3] {
- return true
- }
- }
-
- return false
+ return oidInExtensions(OIDExtensionNameConstraints, c.Extensions)
}
-func (c *Certificate) getSANExtension() ([]byte, bool) {
+func (c *Certificate) getSANExtension() []byte {
for _, e := range c.Extensions {
- if len(e.Id) == 4 && e.Id[0] == OIDExtensionSubjectAltName[0] && e.Id[1] == OIDExtensionSubjectAltName[1] && e.Id[2] == OIDExtensionSubjectAltName[2] && e.Id[3] == OIDExtensionSubjectAltName[3] {
- return e.Value, true
+ if e.Id.Equal(OIDExtensionSubjectAltName) {
+ return e.Value
}
}
- return nil, false
+ return nil
}
func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error {
@@ -934,28 +1068,29 @@ func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey
switch hashType {
case crypto.Hash(0):
- return ErrUnsupportedAlgorithm
+ if pubKeyAlgo != Ed25519 {
+ return ErrUnsupportedAlgorithm
+ }
case crypto.MD5:
return InsecureAlgorithmError(algo)
+ default:
+ if !hashType.Available() {
+ return ErrUnsupportedAlgorithm
+ }
+ h := hashType.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
}
- if !hashType.Available() {
- return ErrUnsupportedAlgorithm
- }
- h := hashType.New()
-
- h.Write(signed)
- digest := h.Sum(nil)
-
switch pub := publicKey.(type) {
case *rsa.PublicKey:
if pubKeyAlgo != RSA {
return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
}
if algo.isRSAPSS() {
- return rsa.VerifyPSS(pub, hashType, digest, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash})
+ return rsa.VerifyPSS(pub, hashType, signed, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash})
} else {
- return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
+ return rsa.VerifyPKCS1v15(pub, hashType, signed, signature)
}
case *dsa.PublicKey:
if pubKeyAlgo != DSA {
@@ -970,7 +1105,12 @@ func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey
if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
return errors.New("x509: DSA signature contained zero or negative values")
}
- if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) {
+ // According to FIPS 186-3, section 4.6, the hash must be truncated if it is longer
+ // than the key length, but crypto/dsa doesn't do it automatically.
+ if maxHashLen := pub.Q.BitLen() / 8; maxHashLen < len(signed) {
+ signed = signed[:maxHashLen]
+ }
+ if !dsa.Verify(pub, signed, dsaSig.R, dsaSig.S) {
return errors.New("x509: DSA verification failure")
}
return
@@ -987,10 +1127,18 @@ func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
return errors.New("x509: ECDSA signature contained zero or negative values")
}
- if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
+ if !ecdsa.Verify(pub, signed, ecdsaSig.R, ecdsaSig.S) {
return errors.New("x509: ECDSA verification failure")
}
return
+ case ed25519.PublicKey:
+ if pubKeyAlgo != Ed25519 {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ if !ed25519.Verify(pub, signed, signature) {
+ return errors.New("x509: Ed25519 verification failure")
+ }
+ return
}
return ErrUnsupportedAlgorithm
}
@@ -1075,9 +1223,9 @@ func RemoveCTPoison(tbsData []byte) ([]byte, error) {
// CertificateTransparency extended key usage). In this case, the issuance
// information of the pre-cert is updated to reflect the next issuer in the
// chain, i.e. the issuer of this special intermediate:
-// - The precert's Issuer is changed to the Issuer of the intermediate
-// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the
-// intermediate.
+// - The precert's Issuer is changed to the Issuer of the intermediate
+// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the
+// intermediate.
func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
data, err := removeExtension(tbsData, OIDExtensionCTPoison)
if err != nil {
@@ -1175,7 +1323,7 @@ const (
)
// RFC 5280, 4.2.2.1
-type authorityInfoAccess struct {
+type accessDescription struct {
Method asn1.ObjectIdentifier
Location asn1.RawValue
}
@@ -1192,32 +1340,53 @@ type distributionPointName struct {
RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
}
-func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
+func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) {
asn1Data := keyData.PublicKey.RightAlign()
switch algo {
- case RSA:
- // RSA public keys must have a NULL in the parameters
- // (https://tools.ietf.org/html/rfc3279#section-2.3.1).
- if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
- return nil, errors.New("x509: RSA key missing NULL parameters")
+ case RSA, RSAESOAEP:
+ // RSA public keys must have a NULL in the parameters.
+ // See RFC 3279, Section 2.3.1.
+ if algo == RSA && !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
+ nfe.AddError(errors.New("x509: RSA key missing NULL parameters"))
+ }
+ if algo == RSAESOAEP {
+ // We only parse the parameters to ensure it is a valid encoding, we throw out the actual values
+ paramsData := keyData.Algorithm.Parameters.FullBytes
+ params := new(rsaesoaepAlgorithmParameters)
+ params.HashFunc = sha1Identifier
+ params.MaskgenFunc = mgf1SHA1Identifier
+ params.PSourceFunc = pSpecifiedEmptyIdentifier
+ rest, err := asn1.Unmarshal(paramsData, params)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after RSAES-OAEP parameters")
+ }
}
p := new(pkcs1PublicKey)
rest, err := asn1.Unmarshal(asn1Data, p)
if err != nil {
- return nil, err
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, p, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
}
if len(rest) != 0 {
return nil, errors.New("x509: trailing data after RSA public key")
}
if p.N.Sign() <= 0 {
- return nil, errors.New("x509: RSA modulus is not a positive number")
+ nfe.AddError(errors.New("x509: RSA modulus is not a positive number"))
}
if p.E <= 0 {
return nil, errors.New("x509: RSA public exponent is not a positive number")
}
+ // TODO(dkarch): Update to return the parameters once crypto/x509 has come up with permanent solution (https://github.com/golang/go/issues/30416)
pub := &rsa.PublicKey{
E: p.E,
N: p.N,
@@ -1227,7 +1396,12 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
var p *big.Int
rest, err := asn1.Unmarshal(asn1Data, &p)
if err != nil {
- return nil, err
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &p, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
}
if len(rest) != 0 {
return nil, errors.New("x509: trailing data after DSA public key")
@@ -1258,14 +1432,14 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
namedCurveOID := new(asn1.ObjectIdentifier)
rest, err := asn1.Unmarshal(paramsData, namedCurveOID)
if err != nil {
- return nil, err
+ return nil, errors.New("x509: failed to parse ECDSA parameters as named curve")
}
if len(rest) != 0 {
return nil, errors.New("x509: trailing data after ECDSA parameters")
}
- namedCurve := namedCurveFromOID(*namedCurveOID)
+ namedCurve := namedCurveFromOID(*namedCurveOID, nfe)
if namedCurve == nil {
- return nil, errors.New("x509: unsupported elliptic curve")
+ return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID)
}
x, y := elliptic.Unmarshal(namedCurve, asn1Data)
if x == nil {
@@ -1277,6 +1451,8 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
Y: y,
}
return pub, nil
+ case Ed25519:
+ return ed25519.PublicKey(asn1Data), nil
default:
return nil, nil
}
@@ -1307,9 +1483,40 @@ func (e NonFatalErrors) Error() string {
// HasError returns true if |e| contains at least one error
func (e *NonFatalErrors) HasError() bool {
+ if e == nil {
+ return false
+ }
return len(e.Errors) > 0
}
+// Append combines the contents of two NonFatalErrors instances.
+func (e *NonFatalErrors) Append(more *NonFatalErrors) *NonFatalErrors {
+ if e == nil {
+ return more
+ }
+ if more == nil {
+ return e
+ }
+ combined := NonFatalErrors{Errors: make([]error, 0, len(e.Errors)+len(more.Errors))}
+ combined.Errors = append(combined.Errors, e.Errors...)
+ combined.Errors = append(combined.Errors, more.Errors...)
+ return &combined
+}
+
+// IsFatal indicates whether an error is fatal.
+func IsFatal(err error) bool {
+ if err == nil {
+ return false
+ }
+ if _, ok := err.(NonFatalErrors); ok {
+ return false
+ }
+ if errs, ok := err.(*Errors); ok {
+ return errs.Fatal()
+ }
+ return true
+}
+
func parseDistributionPoints(data []byte, crldp *[]string) error {
// CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
//
@@ -1421,7 +1628,7 @@ func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddres
return
}
-// isValidIPMask returns true iff mask consists of zero or more 1 bits, followed by zero bits.
+// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits.
func isValidIPMask(mask []byte) bool {
seenZero := false
@@ -1474,7 +1681,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonF
}
if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 {
- // https://tools.ietf.org/html/rfc5280#section-4.2.1.10:
+ // From RFC 5280, Section 4.2.1.10:
// “either the permittedSubtrees field
// or the excludedSubtrees MUST be
// present”
@@ -1622,7 +1829,7 @@ func parseCertificate(in *certificate) (*Certificate, error) {
out.PublicKeyAlgorithm =
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
var err error
- out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
+ out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe)
if err != nil {
return nil, err
}
@@ -1632,12 +1839,22 @@ func parseCertificate(in *certificate) (*Certificate, error) {
var issuer, subject pkix.RDNSequence
if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil {
- return nil, err
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Subject.FullBytes, &subject, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
} else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 subject")
}
if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil {
- return nil, err
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Issuer.FullBytes, &issuer, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
} else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 subject")
}
@@ -1729,10 +1946,21 @@ func parseCertificate(in *certificate) (*Certificate, error) {
// KeyPurposeId ::= OBJECT IDENTIFIER
var keyUsage []asn1.ObjectIdentifier
- if rest, err := asn1.Unmarshal(e.Value, &keyUsage); err != nil {
- return nil, err
- } else if len(rest) != 0 {
- return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
+ if len(e.Value) == 0 {
+ nfe.AddError(errors.New("x509: empty ExtendedKeyUsage"))
+ } else {
+ rest, err := asn1.Unmarshal(e.Value, &keyUsage)
+ if err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(e.Value, &keyUsage, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
+ }
}
for _, u := range keyUsage {
@@ -1772,12 +2000,15 @@ func parseCertificate(in *certificate) (*Certificate, error) {
}
} else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) {
// RFC 5280 4.2.2.1: Authority Information Access
- var aia []authorityInfoAccess
+ var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
return nil, err
} else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 authority information")
}
+ if len(aia) == 0 {
+ nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension"))
+ }
for _, v := range aia {
// GeneralName: uniformResourceIdentifier [6] IA5String
@@ -1790,6 +2021,34 @@ func parseCertificate(in *certificate) (*Certificate, error) {
out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes))
}
}
+ } else if e.Id.Equal(OIDExtensionSubjectInfoAccess) {
+ // RFC 5280 4.2.2.2: Subject Information Access
+ var sia []accessDescription
+ if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 subject information")
+ }
+ if len(sia) == 0 {
+ nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension"))
+ }
+
+ for _, v := range sia {
+ // TODO(drysdale): cope with non-URI types of GeneralName
+ // GeneralName: uniformResourceIdentifier [6] IA5String
+ if v.Location.Tag != 6 {
+ continue
+ }
+ if v.Method.Equal(OIDSubjectInfoAccessTimestamp) {
+ out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes))
+ } else if v.Method.Equal(OIDSubjectInfoAccessCARepo) {
+ out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes))
+ }
+ }
+ } else if e.Id.Equal(OIDExtensionIPPrefixList) {
+ out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe)
+ } else if e.Id.Equal(OIDExtensionASList) {
+ out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe)
} else if e.Id.Equal(OIDExtensionCTSCT) {
if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err))
@@ -1821,16 +2080,33 @@ func parseCertificate(in *certificate) (*Certificate, error) {
// The parsed data is returned in a Certificate struct for ease of access.
func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) {
var tbsCert tbsCertificate
+ var nfe NonFatalErrors
rest, err := asn1.Unmarshal(asn1Data, &tbsCert)
if err != nil {
- return nil, err
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &tbsCert, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
}
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
- return parseCertificate(&certificate{
+ ret, err := parseCertificate(&certificate{
Raw: tbsCert.Raw,
TBSCertificate: tbsCert})
+ if err != nil {
+ errs, ok := err.(NonFatalErrors)
+ if !ok {
+ return nil, err
+ }
+ nfe.Errors = append(nfe.Errors, errs.Errors...)
+ }
+ if nfe.HasError() {
+ return ret, nfe
+ }
+ return ret, nil
}
// ParseCertificate parses a single certificate from the given ASN.1 DER data.
@@ -1838,15 +2114,31 @@ func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) {
// error will be of type NonFatalErrors).
func ParseCertificate(asn1Data []byte) (*Certificate, error) {
var cert certificate
+ var nfe NonFatalErrors
rest, err := asn1.Unmarshal(asn1Data, &cert)
if err != nil {
- return nil, err
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
}
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
-
- return parseCertificate(&cert)
+ ret, err := parseCertificate(&cert)
+ if err != nil {
+ errs, ok := err.(NonFatalErrors)
+ if !ok {
+ return nil, err
+ }
+ nfe.Errors = append(nfe.Errors, errs.Errors...)
+ }
+ if nfe.HasError() {
+ return ret, nfe
+ }
+ return ret, nil
}
// ParseCertificates parses one or more certificates from the given ASN.1 DER
@@ -1855,27 +2147,32 @@ func ParseCertificate(asn1Data []byte) (*Certificate, error) {
// case the error will be of type NonFatalErrors).
func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
var v []*certificate
+ var nfe NonFatalErrors
for len(asn1Data) > 0 {
cert := new(certificate)
var err error
asn1Data, err = asn1.Unmarshal(asn1Data, cert)
if err != nil {
- return nil, err
+ var laxErr error
+ asn1Data, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
}
v = append(v, cert)
}
- var nfe NonFatalErrors
ret := make([]*Certificate, len(v))
for i, ci := range v {
cert, err := parseCertificate(ci)
if err != nil {
- if errs, ok := err.(NonFatalErrors); !ok {
+ errs, ok := err.(NonFatalErrors)
+ if !ok {
return nil, err
- } else {
- nfe.Errors = append(nfe.Errors, errs.Errors...)
}
+ nfe.Errors = append(nfe.Errors, errs.Errors...)
}
ret[i] = cert
}
@@ -1934,18 +2231,26 @@ var (
OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1}
OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11}
+
// OIDExtensionCTPoison is defined in RFC 6962 s3.1.
OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
// OIDExtensionCTSCT is defined in RFC 6962 s3.3.
OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
+ // OIDExtensionIPPrefixList is defined in RFC 3779 s2.
+ OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7}
+ // OIDExtensionASList is defined in RFC 3779 s3.
+ OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8}
)
var (
OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
+ OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3}
+ OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5}
+ OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0}
)
-// oidInExtensions returns whether an extension with the given oid exists in
+// oidInExtensions reports whether an extension with the given oid exists in
// extensions.
func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
for _, e := range extensions {
@@ -1991,7 +2296,7 @@ func isIA5String(s string) error {
}
func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) {
- ret = make([]pkix.Extension, 11 /* maximum number of elements. */)
+ ret = make([]pkix.Extension, 12 /* maximum number of elements. */)
n := 0
if template.KeyUsage != 0 &&
@@ -2076,15 +2381,15 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
!oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) {
ret[n].Id = OIDExtensionAuthorityInfoAccess
- var aiaValues []authorityInfoAccess
+ var aiaValues []accessDescription
for _, name := range template.OCSPServer {
- aiaValues = append(aiaValues, authorityInfoAccess{
+ aiaValues = append(aiaValues, accessDescription{
Method: OIDAuthorityInfoAccessOCSP,
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
})
}
for _, name := range template.IssuingCertificateURL {
- aiaValues = append(aiaValues, authorityInfoAccess{
+ aiaValues = append(aiaValues, accessDescription{
Method: OIDAuthorityInfoAccessIssuers,
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
})
@@ -2096,10 +2401,33 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
n++
}
+ if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 &&
+ !oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionSubjectInfoAccess
+ var siaValues []accessDescription
+ for _, ts := range template.SubjectTimestamps {
+ siaValues = append(siaValues, accessDescription{
+ Method: OIDSubjectInfoAccessTimestamp,
+ Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)},
+ })
+ }
+ for _, repo := range template.SubjectCARepositories {
+ siaValues = append(siaValues, accessDescription{
+ Method: OIDSubjectInfoAccessCARepo,
+ Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)},
+ })
+ }
+ ret[n].Value, err = asn1.Marshal(siaValues)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
!oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) {
ret[n].Id = OIDExtensionSubjectAltName
- // https://tools.ietf.org/html/rfc5280#section-4.2.1.6
+ // From RFC 5280, Section 4.2.1.6:
// “If the subject field contains an empty sequence ... then
// subjectAltName extension ... is marked as critical”
ret[n].Critical = subjectIsEmpty
@@ -2231,7 +2559,7 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
dp := distributionPoint{
DistributionPoint: distributionPointName{
FullName: []asn1.RawValue{
- asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
+ {Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
},
},
}
@@ -2262,7 +2590,8 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
}
// Adding another extension here? Remember to update the maximum number
- // of elements in the make() at the top of the function.
+ // of elements in the make() at the top of the function and the list of
+ // template fields used in CreateCertificate documentation.
return append(ret[:n], template.ExtraExtensions...), nil
}
@@ -2305,8 +2634,12 @@ func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgori
err = errors.New("x509: unknown elliptic curve")
}
+ case ed25519.PublicKey:
+ pubType = Ed25519
+ sigAlgo.Algorithm = oidSignatureEd25519
+
default:
- err = errors.New("x509: only RSA and ECDSA keys supported")
+ err = errors.New("x509: only RSA, ECDSA and Ed25519 keys supported")
}
if err != nil {
@@ -2325,7 +2658,7 @@ func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgori
return
}
sigAlgo.Algorithm, hashFunc = details.oid, details.hash
- if hashFunc == 0 {
+ if hashFunc == 0 && pubType != Ed25519 {
err = errors.New("x509: cannot sign with hash function requested")
return
}
@@ -2349,12 +2682,26 @@ func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgori
var emptyASN1Subject = []byte{0x30, 0}
// CreateCertificate creates a new X.509v3 certificate based on a template.
-// The following members of template are used: AuthorityKeyId,
-// BasicConstraintsValid, DNSNames, ExcludedDNSDomains, ExtKeyUsage,
-// IsCA, KeyUsage, MaxPathLen, MaxPathLenZero, NotAfter, NotBefore,
-// PermittedDNSDomains, PermittedDNSDomainsCritical, SerialNumber,
-// SignatureAlgorithm, Subject, SubjectKeyId, UnknownExtKeyUsage,
-// and RawSCT.
+// The following members of template are used:
+// - SerialNumber
+// - Subject
+// - NotBefore, NotAfter
+// - SignatureAlgorithm
+// - For extensions:
+// - KeyUsage
+// - ExtKeyUsage, UnknownExtKeyUsage
+// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero
+// - SubjectKeyId
+// - AuthorityKeyId
+// - OCSPServer, IssuingCertificateURL
+// - SubjectTimestamps, SubjectCARepositories
+// - DNSNames, EmailAddresses, IPAddresses, URIs
+// - PolicyIdentifiers
+// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical,
+// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains
+// - CRLDistributionPoints
+// - RawSCT, SCTList
+// - ExtraExtensions
//
// The certificate is signed by parent. If parent is equal to template then the
// certificate is self-signed. The parameter pub is the public key of the
@@ -2362,8 +2709,9 @@ var emptyASN1Subject = []byte{0x30, 0}
//
// The returned slice is the certificate in DER encoding.
//
-// All keys types that are implemented via crypto.Signer are supported (This
-// includes *rsa.PublicKey and *ecdsa.PublicKey.)
+// The currently supported key types are *rsa.PublicKey, *ecdsa.PublicKey and
+// ed25519.PublicKey. pub must be a supported key type, and priv must be a
+// crypto.Signer with a supported public key.
//
// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any,
// unless the resulting certificate is self-signed. Otherwise the value from
@@ -2424,15 +2772,16 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv
if err != nil {
return
}
-
c.Raw = tbsCertContents
- h := hashFunc.New()
- h.Write(tbsCertContents)
- digest := h.Sum(nil)
+ signed := tbsCertContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
- var signerOpts crypto.SignerOpts
- signerOpts = hashFunc
+ var signerOpts crypto.SignerOpts = hashFunc
if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() {
signerOpts = &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
@@ -2441,7 +2790,7 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv
}
var signature []byte
- signature, err = key.Sign(rand, digest, signerOpts)
+ signature, err = key.Sign(rand, signed, signerOpts)
if err != nil {
return
}
@@ -2531,12 +2880,15 @@ func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts [
return
}
- h := hashFunc.New()
- h.Write(tbsCertListContents)
- digest := h.Sum(nil)
+ signed := tbsCertListContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
var signature []byte
- signature, err = key.Sign(rand, digest, hashFunc)
+ signature, err = key.Sign(rand, signed, hashFunc)
if err != nil {
return
}
@@ -2564,21 +2916,25 @@ type CertificateRequest struct {
Subject pkix.Name
- // Attributes is the dried husk of a bug and shouldn't be used.
+ // Attributes contains the CSR attributes that can parse as
+ // pkix.AttributeTypeAndValueSET.
+ //
+ // Deprecated: Use Extensions and ExtraExtensions instead for parsing and
+ // generating the requestedExtensions attribute.
Attributes []pkix.AttributeTypeAndValueSET
- // Extensions contains raw X.509 extensions. When parsing CSRs, this
- // can be used to extract extensions that are not parsed by this
+ // Extensions contains all requested extensions, in raw form. When parsing
+ // CSRs, this can be used to extract extensions that are not parsed by this
// package.
Extensions []pkix.Extension
- // ExtraExtensions contains extensions to be copied, raw, into any
- // marshaled CSR. Values override any extensions that would otherwise
- // be produced based on the other fields but are overridden by any
- // extensions specified in Attributes.
+ // ExtraExtensions contains extensions to be copied, raw, into any CSR
+ // marshaled by CreateCertificateRequest. Values override any extensions
+ // that would otherwise be produced based on the other fields but are
+ // overridden by any extensions specified in Attributes.
//
- // The ExtraExtensions field is not populated when parsing CSRs, see
- // Extensions.
+ // The ExtraExtensions field is not populated by ParseCertificateRequest,
+ // see Extensions instead.
ExtraExtensions []pkix.Extension
// Subject Alternate Name values.
@@ -2628,7 +2984,7 @@ func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawVal
return rawAttributes, nil
}
-// parseRawAttributes Unmarshals RawAttributes intos AttributeTypeAndValueSETs.
+// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs.
func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET {
var attributes []pkix.AttributeTypeAndValueSET
for _, rawAttr := range rawAttributes {
@@ -2646,8 +3002,7 @@ func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndVa
// parseCSRExtensions parses the attributes from a CSR and extracts any
// requested extensions.
func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) {
- // pkcs10Attribute reflects the Attribute structure from section 4.1 of
- // https://tools.ietf.org/html/rfc2986.
+ // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1.
type pkcs10Attribute struct {
Id asn1.ObjectIdentifier
Values []asn1.RawValue `asn1:"set"`
@@ -2676,14 +3031,24 @@ func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error)
}
// CreateCertificateRequest creates a new certificate request based on a
-// template. The following members of template are used: Attributes, DNSNames,
-// EmailAddresses, ExtraExtensions, IPAddresses, URIs, SignatureAlgorithm, and
-// Subject. The private key is the private key of the signer.
+// template. The following members of template are used:
//
-// The returned slice is the certificate request in DER encoding.
+// - SignatureAlgorithm
+// - Subject
+// - DNSNames
+// - EmailAddresses
+// - IPAddresses
+// - URIs
+// - ExtraExtensions
+// - Attributes (deprecated)
+//
+// priv is the private key to sign the CSR with, and the corresponding public
+// key will be included in the CSR. It must implement crypto.Signer and its
+// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey or a
+// ed25519.PublicKey. (A *rsa.PrivateKey, *ecdsa.PrivateKey or
+// ed25519.PrivateKey satisfies this.)
//
-// All keys types that are implemented via crypto.Signer are supported (This
-// includes *rsa.PublicKey and *ecdsa.PublicKey.)
+// The returned slice is the certificate request in DER encoding.
func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv interface{}) (csr []byte, err error) {
key, ok := priv.(crypto.Signer)
if !ok {
@@ -2721,77 +3086,96 @@ func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv
extensions = append(extensions, template.ExtraExtensions...)
- var attributes []pkix.AttributeTypeAndValueSET
- attributes = append(attributes, template.Attributes...)
+ // Make a copy of template.Attributes because we may alter it below.
+ attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes))
+ for _, attr := range template.Attributes {
+ values := make([][]pkix.AttributeTypeAndValue, len(attr.Value))
+ copy(values, attr.Value)
+ attributes = append(attributes, pkix.AttributeTypeAndValueSET{
+ Type: attr.Type,
+ Value: values,
+ })
+ }
+ extensionsAppended := false
if len(extensions) > 0 {
- // specifiedExtensions contains all the extensions that we
- // found specified via template.Attributes.
- specifiedExtensions := make(map[string]bool)
-
- for _, atvSet := range template.Attributes {
- if !atvSet.Type.Equal(oidExtensionRequest) {
+ // Append the extensions to an existing attribute if possible.
+ for _, atvSet := range attributes {
+ if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
continue
}
+ // specifiedExtensions contains all the extensions that we
+ // found specified via template.Attributes.
+ specifiedExtensions := make(map[string]bool)
+
for _, atvs := range atvSet.Value {
for _, atv := range atvs {
specifiedExtensions[atv.Type.String()] = true
}
}
- }
- atvs := make([]pkix.AttributeTypeAndValue, 0, len(extensions))
- for _, e := range extensions {
- if specifiedExtensions[e.Id.String()] {
- // Attributes already contained a value for
- // this extension and it takes priority.
- continue
- }
+ newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions))
+ newValue = append(newValue, atvSet.Value[0]...)
- atvs = append(atvs, pkix.AttributeTypeAndValue{
- // There is no place for the critical flag in a CSR.
- Type: e.Id,
- Value: e.Value,
- })
- }
+ for _, e := range extensions {
+ if specifiedExtensions[e.Id.String()] {
+ // Attributes already contained a value for
+ // this extension and it takes priority.
+ continue
+ }
- // Append the extensions to an existing attribute if possible.
- appended := false
- for _, atvSet := range attributes {
- if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
- continue
+ newValue = append(newValue, pkix.AttributeTypeAndValue{
+ // There is no place for the critical
+ // flag in an AttributeTypeAndValue.
+ Type: e.Id,
+ Value: e.Value,
+ })
}
- atvSet.Value[0] = append(atvSet.Value[0], atvs...)
- appended = true
+ atvSet.Value[0] = newValue
+ extensionsAppended = true
break
}
+ }
- // Otherwise, add a new attribute for the extensions.
- if !appended {
- attributes = append(attributes, pkix.AttributeTypeAndValueSET{
- Type: oidExtensionRequest,
- Value: [][]pkix.AttributeTypeAndValue{
- atvs,
- },
- })
+ rawAttributes, err := newRawAttributes(attributes)
+ if err != nil {
+ return
+ }
+
+ // If not included in attributes, add a new attribute for the
+ // extensions.
+ if len(extensions) > 0 && !extensionsAppended {
+ attr := struct {
+ Type asn1.ObjectIdentifier
+ Value [][]pkix.Extension `asn1:"set"`
+ }{
+ Type: oidExtensionRequest,
+ Value: [][]pkix.Extension{extensions},
+ }
+
+ b, err := asn1.Marshal(attr)
+ if err != nil {
+ return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error())
+ }
+
+ var rawValue asn1.RawValue
+ if _, err := asn1.Unmarshal(b, &rawValue); err != nil {
+ return nil, err
}
+
+ rawAttributes = append(rawAttributes, rawValue)
}
asn1Subject := template.RawSubject
if len(asn1Subject) == 0 {
asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
if err != nil {
- return
+ return nil, err
}
}
- rawAttributes, err := newRawAttributes(attributes)
- if err != nil {
- return
- }
-
tbsCSR := tbsCertificateRequest{
Version: 0, // PKCS #10, RFC 2986
Subject: asn1.RawValue{FullBytes: asn1Subject},
@@ -2811,12 +3195,15 @@ func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv
}
tbsCSR.Raw = tbsCSRContents
- h := hashFunc.New()
- h.Write(tbsCSRContents)
- digest := h.Sum(nil)
+ signed := tbsCSRContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
var signature []byte
- signature, err = key.Sign(rand, digest, hashFunc)
+ signature, err = key.Sign(rand, signed, hashFunc)
if err != nil {
return
}
@@ -2848,7 +3235,7 @@ func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
out := &CertificateRequest{
- Raw: in.Raw,
+ Raw: in.Raw,
RawTBSCertificateRequest: in.TBSCSR.Raw,
RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
RawSubject: in.TBSCSR.Subject.FullBytes,
@@ -2863,10 +3250,15 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
}
var err error
- out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey)
+ var nfe NonFatalErrors
+ out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe)
if err != nil {
return nil, err
}
+ // Treat non-fatal errors as fatal here.
+ if len(nfe.Errors) > 0 {
+ return nil, nfe.Errors[0]
+ }
var subject pkix.RDNSequence
if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
@@ -2881,7 +3273,6 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
return nil, err
}
- var nfe NonFatalErrors
for _, extension := range out.Extensions {
if extension.Id.Equal(OIDExtensionSubjectAltName) {
out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe)
diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
new file mode 100644
index 0000000000..81f54d5ef2
--- /dev/null
+++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
@@ -0,0 +1,151 @@
+// Copyright 2022 Google LLC.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner").
+// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use.
+package client
+
+import (
+ "crypto"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/gob"
+ "fmt"
+ "io"
+ "net/rpc"
+ "os"
+ "os/exec"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client/util"
+)
+
+const signAPI = "EnterpriseCertSigner.Sign"
+const certificateChainAPI = "EnterpriseCertSigner.CertificateChain"
+const publicKeyAPI = "EnterpriseCertSigner.Public"
+
+// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser.
+type Connection struct {
+ io.ReadCloser
+ io.WriteCloser
+}
+
+// Close closes c's underlying ReadCloser and WriteCloser.
+func (c *Connection) Close() error {
+ rerr := c.ReadCloser.Close()
+ werr := c.WriteCloser.Close()
+ if rerr != nil {
+ return rerr
+ }
+ return werr
+}
+
+func init() {
+ gob.Register(crypto.SHA256)
+ gob.Register(&rsa.PSSOptions{})
+}
+
+// SignArgs contains arguments to a crypto Signer.Sign method.
+type SignArgs struct {
+ Digest []byte // The content to sign.
+ Opts crypto.SignerOpts // Options for signing, such as Hash identifier.
+}
+
+// Key implements credential.Credential by holding the executed signer subprocess.
+type Key struct {
+ cmd *exec.Cmd // Pointer to the signer subprocess.
+ client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess.
+ publicKey crypto.PublicKey // Public key of loaded certificate.
+ chain [][]byte // Certificate chain of loaded certificate.
+}
+
+// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key.
+func (k *Key) CertificateChain() [][]byte {
+ return k.chain
+}
+
+// Close closes the RPC connection and kills the signer subprocess.
+// Call this to free up resources when the Key object is no longer needed.
+func (k *Key) Close() error {
+ if err := k.client.Close(); err != nil {
+ return fmt.Errorf("failed to close RPC connection: %w", err)
+ }
+ if err := k.cmd.Process.Kill(); err != nil {
+ return fmt.Errorf("failed to kill signer process: %w", err)
+ }
+ if err := k.cmd.Wait(); err.Error() != "signal: killed" {
+ return fmt.Errorf("signer process was not killed: %w", err)
+ }
+ return nil
+}
+
+// Public returns the public key for this Key.
+func (k *Key) Public() crypto.PublicKey {
+ return k.publicKey
+}
+
+// Sign signs a message by encrypting a message digest, using the specified signer options.
+func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) {
+ err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed)
+ return
+}
+
+// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate
+// related operations, including signing messages with the private key.
+//
+// The signer binary path is read from the specified configFilePath, if provided.
+// Otherwise, use the default config file path.
+//
+// The config file also specifies which certificate the signer should use.
+func Cred(configFilePath string) (*Key, error) {
+ if configFilePath == "" {
+ configFilePath = util.GetDefaultConfigFilePath()
+ }
+ enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath)
+ if err != nil {
+ return nil, err
+ }
+ k := &Key{
+ cmd: exec.Command(enterpriseCertSignerPath, configFilePath),
+ }
+
+ // Redirect errors from subprocess to parent process.
+ k.cmd.Stderr = os.Stderr
+
+ // RPC client will communicate with subprocess over stdin/stdout.
+ kin, err := k.cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+ kout, err := k.cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+ k.client = rpc.NewClient(&Connection{kout, kin})
+
+ if err := k.cmd.Start(); err != nil {
+ return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err)
+ }
+
+ if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil {
+ return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err)
+ }
+
+ var publicKeyBytes []byte
+ if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil {
+ return nil, fmt.Errorf("failed to retrieve public key: %w", err)
+ }
+
+ publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse public key: %w", err)
+ }
+
+ var ok bool
+ k.publicKey, ok = publicKey.(crypto.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("invalid public key type: %T", publicKey)
+ }
+
+ return k, nil
+}
diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
new file mode 100644
index 0000000000..6b5f2806e6
--- /dev/null
+++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
@@ -0,0 +1,72 @@
+// Package util provides helper functions for the client.
+package util
+
+import (
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+)
+
+const configFileName = "enterprise_certificate_config.json"
+
+// EnterpriseCertificateConfig contains parameters for initializing signer.
+type EnterpriseCertificateConfig struct {
+ Libs Libs `json:"libs"`
+}
+
+// Libs specifies the locations of helper libraries.
+type Libs struct {
+ SignerBinary string `json:"signer_binary"`
+}
+
+// LoadSignerBinaryPath retrieves the path of the signer binary from the config file.
+func LoadSignerBinaryPath(configFilePath string) (path string, err error) {
+ jsonFile, err := os.Open(configFilePath)
+ if err != nil {
+ return "", err
+ }
+
+ byteValue, err := ioutil.ReadAll(jsonFile)
+ if err != nil {
+ return "", err
+ }
+ var config EnterpriseCertificateConfig
+ err = json.Unmarshal(byteValue, &config)
+ if err != nil {
+ return "", err
+ }
+ signerBinaryPath := config.Libs.SignerBinary
+ if signerBinaryPath == "" {
+ return "", errors.New("Signer binary path is missing.")
+ }
+ return signerBinaryPath, nil
+}
+
+func guessHomeDir() string {
+ // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
+ if v := os.Getenv("HOME"); v != "" {
+ return v
+ }
+ // Else, fall back to user.Current:
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
+ }
+ return ""
+}
+
+func getDefaultConfigFileDirectory() (directory string) {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud")
+ } else {
+ return filepath.Join(guessHomeDir(), ".config/gcloud")
+ }
+}
+
+// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud.
+func GetDefaultConfigFilePath() (path string) {
+ return filepath.Join(getDefaultConfigFileDirectory(), configFileName)
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
new file mode 100644
index 0000000000..0e643a05b5
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ "v2": "2.4.0"
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
new file mode 100644
index 0000000000..b42ace44c9
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
@@ -0,0 +1,18 @@
+# Changelog
+
+## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09)
+
+
+### Features
+
+* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c))
+
+
+### Bug Fixes
+
+* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e))
+
+
+### Miscellaneous Chores
+
+* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719))
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
new file mode 100644
index 0000000000..7d0128a0cd
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
@@ -0,0 +1,298 @@
+// Copyright 2021, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package apierror implements a wrapper error for parsing error details from
+// API calls. Both HTTP & gRPC status errors are supported.
+package apierror
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto"
+ "google.golang.org/api/googleapi"
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+// ErrDetails holds the google/rpc/error_details.proto messages.
+type ErrDetails struct {
+ ErrorInfo *errdetails.ErrorInfo
+ BadRequest *errdetails.BadRequest
+ PreconditionFailure *errdetails.PreconditionFailure
+ QuotaFailure *errdetails.QuotaFailure
+ RetryInfo *errdetails.RetryInfo
+ ResourceInfo *errdetails.ResourceInfo
+ RequestInfo *errdetails.RequestInfo
+ DebugInfo *errdetails.DebugInfo
+ Help *errdetails.Help
+ LocalizedMessage *errdetails.LocalizedMessage
+
+ // Unknown stores unidentifiable error details.
+ Unknown []interface{}
+}
+
+func (e ErrDetails) String() string {
+ var d strings.Builder
+ if e.ErrorInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n",
+ e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata()))
+ }
+
+ if e.BadRequest != nil {
+ v := e.BadRequest.GetFieldViolations()
+ var f []string
+ var desc []string
+ for _, x := range v {
+ f = append(f, x.GetField())
+ desc = append(desc, x.GetDescription())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n",
+ strings.Join(f, " "), strings.Join(desc, " ")))
+ }
+
+ if e.PreconditionFailure != nil {
+ v := e.PreconditionFailure.GetViolations()
+ var t []string
+ var s []string
+ var desc []string
+ for _, x := range v {
+ t = append(t, x.GetType())
+ s = append(s, x.GetSubject())
+ desc = append(desc, x.GetDescription())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "),
+ strings.Join(s, " "), strings.Join(desc, " ")))
+ }
+
+ if e.QuotaFailure != nil {
+ v := e.QuotaFailure.GetViolations()
+ var s []string
+ var desc []string
+ for _, x := range v {
+ s = append(s, x.GetSubject())
+ desc = append(desc, x.GetDescription())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n",
+ strings.Join(s, " "), strings.Join(desc, " ")))
+ }
+
+ if e.RequestInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n",
+ e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData()))
+ }
+
+ if e.ResourceInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n",
+ e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(),
+ e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription()))
+
+ }
+ if e.RetryInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration()))
+
+ }
+ if e.Unknown != nil {
+ var s []string
+ for _, x := range e.Unknown {
+ s = append(s, fmt.Sprintf("%v", x))
+ }
+ d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " ")))
+ }
+
+ if e.DebugInfo != nil {
+ d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(),
+ strings.Join(e.DebugInfo.GetStackEntries(), " ")))
+ }
+ if e.Help != nil {
+ var desc []string
+ var url []string
+ for _, x := range e.Help.Links {
+ desc = append(desc, x.GetDescription())
+ url = append(url, x.GetUrl())
+ }
+ d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n",
+ strings.Join(desc, " "), strings.Join(url, " ")))
+ }
+ if e.LocalizedMessage != nil {
+ d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n",
+ e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage()))
+ }
+
+ return d.String()
+}
+
+// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It
+// implements error and Status interfaces.
+type APIError struct {
+ err error
+ status *status.Status
+ httpErr *googleapi.Error
+ details ErrDetails
+}
+
+// Details presents the error details of the APIError.
+func (a *APIError) Details() ErrDetails {
+ return a.details
+}
+
+// Unwrap extracts the original error.
+func (a *APIError) Unwrap() error {
+ return a.err
+}
+
+// Error returns a readable representation of the APIError.
+func (a *APIError) Error() string {
+ var msg string
+ if a.status != nil {
+ msg = a.err.Error()
+ } else if a.httpErr != nil {
+ // Truncate the googleapi.Error message because it dumps the Details in
+ // an ugly way.
+ msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message)
+ }
+ return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details))
+}
+
+// GRPCStatus extracts the underlying gRPC Status error.
+// This method is necessary to fulfill the interface
+// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError.
+func (a *APIError) GRPCStatus() *status.Status {
+ return a.status
+}
+
+// Reason returns the reason in an ErrorInfo.
+// If ErrorInfo is nil, it returns an empty string.
+func (a *APIError) Reason() string {
+ return a.details.ErrorInfo.GetReason()
+}
+
+// Domain returns the domain in an ErrorInfo.
+// If ErrorInfo is nil, it returns an empty string.
+func (a *APIError) Domain() string {
+ return a.details.ErrorInfo.GetDomain()
+}
+
+// Metadata returns the metadata in an ErrorInfo.
+// If ErrorInfo is nil, it returns nil.
+func (a *APIError) Metadata() map[string]string {
+ return a.details.ErrorInfo.GetMetadata()
+
+}
+
+// FromError parses a Status error or a googleapi.Error and builds an APIError.
+func FromError(err error) (*APIError, bool) {
+ if err == nil {
+ return nil, false
+ }
+
+ ae := APIError{err: err}
+ st, isStatus := status.FromError(err)
+ var herr *googleapi.Error
+ isHTTPErr := errors.As(err, &herr)
+
+ switch {
+ case isStatus:
+ ae.status = st
+ ae.details = parseDetails(st.Details())
+ case isHTTPErr:
+ ae.httpErr = herr
+ ae.details = parseHTTPDetails(herr)
+ default:
+ return nil, false
+ }
+
+ return &ae, true
+
+}
+
+// parseDetails accepts a slice of interface{} that should be backed by some
+// sort of proto.Message that can be cast to the google/rpc/error_details.proto
+// types.
+//
+// This is for internal use only.
+func parseDetails(details []interface{}) ErrDetails {
+ var ed ErrDetails
+ for _, d := range details {
+ switch d := d.(type) {
+ case *errdetails.ErrorInfo:
+ ed.ErrorInfo = d
+ case *errdetails.BadRequest:
+ ed.BadRequest = d
+ case *errdetails.PreconditionFailure:
+ ed.PreconditionFailure = d
+ case *errdetails.QuotaFailure:
+ ed.QuotaFailure = d
+ case *errdetails.RetryInfo:
+ ed.RetryInfo = d
+ case *errdetails.ResourceInfo:
+ ed.ResourceInfo = d
+ case *errdetails.RequestInfo:
+ ed.RequestInfo = d
+ case *errdetails.DebugInfo:
+ ed.DebugInfo = d
+ case *errdetails.Help:
+ ed.Help = d
+ case *errdetails.LocalizedMessage:
+ ed.LocalizedMessage = d
+ default:
+ ed.Unknown = append(ed.Unknown, d)
+ }
+ }
+
+ return ed
+}
+
+// parseHTTPDetails will convert the given googleapi.Error into the protobuf
+// representation then parse the Any values that contain the error details.
+//
+// This is for internal use only.
+func parseHTTPDetails(gae *googleapi.Error) ErrDetails {
+ e := &jsonerror.Error{}
+ if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil {
+ // If the error body does not conform to the error schema, ignore it
+ // altogther. See https://cloud.google.com/apis/design/errors#http_mapping.
+ return ErrDetails{}
+ }
+
+ // Coerce the Any messages into proto.Message then parse the details.
+ details := []interface{}{}
+ for _, any := range e.GetError().GetDetails() {
+ m, err := any.UnmarshalNew()
+ if err != nil {
+ // Ignore malformed Any values.
+ continue
+ }
+ details = append(details, m)
+ }
+
+ return parseDetails(details)
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
new file mode 100644
index 0000000000..9ff0caea94
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
@@ -0,0 +1,30 @@
+# HTTP JSON Error Schema
+
+The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey
+error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping.
+This package is for internal parsing logic only and should not be used in any
+other context.
+
+## Regeneration
+
+To regenerate the protobuf Go code you will need the following:
+
+* A local copy of [googleapis], the absolute path to which should be exported to
+the environment variable `GOOGLEAPIS`
+* The protobuf compiler [protoc]
+* The Go [protobuf plugin]
+* The [goimports] tool
+
+From this directory run the following command:
+```sh
+protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto
+goimports -w .
+```
+
+Note: the `module` plugin option ensures the generated code is placed in this
+directory, and not in several nested directories defined by `go_package` option.
+
+[googleapis]: https://github.com/googleapis/googleapis
+[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation
+[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated
+[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports \ No newline at end of file
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
new file mode 100644
index 0000000000..7dd9b83739
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
@@ -0,0 +1,280 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.15.8
+// source: apierror/internal/proto/error.proto
+
+package jsonerror
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ code "google.golang.org/genproto/googleapis/rpc/code"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The error format v2 for Google JSON REST APIs.
+// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
+//
+// NOTE: This schema is not used for other wire protocols.
+type Error struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The actual error payload. The nested message structure is for backward
+ // compatibility with Google API client libraries. It also makes the error
+ // more readable to developers.
+ Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *Error) Reset() {
+ *x = Error{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Error) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Error) ProtoMessage() {}
+
+func (x *Error) ProtoReflect() protoreflect.Message {
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Error.ProtoReflect.Descriptor instead.
+func (*Error) Descriptor() ([]byte, []int) {
+ return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Error) GetError() *Error_Status {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+// This message has the same semantics as `google.rpc.Status`. It uses HTTP
+// status code instead of gRPC status code. It has an extra field `status`
+// for backward compatibility with Google API Client Libraries.
+type Error_Status struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP status code that corresponds to `google.rpc.Status.code`.
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ // This corresponds to `google.rpc.Status.message`.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ // This is the enum version for `google.rpc.Status.code`.
+ Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"`
+ // This corresponds to `google.rpc.Status.details`.
+ Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
+}
+
+func (x *Error_Status) Reset() {
+ *x = Error_Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Error_Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Error_Status) ProtoMessage() {}
+
+func (x *Error_Status) ProtoReflect() protoreflect.Message {
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead.
+func (*Error_Status) Descriptor() ([]byte, []int) {
+ return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Error_Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
+ }
+ return 0
+}
+
+func (x *Error_Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Error_Status) GetStatus() code.Code {
+ if x != nil {
+ return x.Status
+ }
+ return code.Code(0)
+}
+
+func (x *Error_Status) GetDetails() []*anypb.Any {
+ if x != nil {
+ return x.Details
+ }
+ return nil
+}
+
+var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor
+
+var file_apierror_internal_proto_error_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5,
+ 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f,
+ 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
+ 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64,
+ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
+ 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_apierror_internal_proto_error_proto_rawDescOnce sync.Once
+ file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc
+)
+
+func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte {
+ file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() {
+ file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData)
+ })
+ return file_apierror_internal_proto_error_proto_rawDescData
+}
+
+var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_apierror_internal_proto_error_proto_goTypes = []interface{}{
+ (*Error)(nil), // 0: error.Error
+ (*Error_Status)(nil), // 1: error.Error.Status
+ (code.Code)(0), // 2: google.rpc.Code
+ (*anypb.Any)(nil), // 3: google.protobuf.Any
+}
+var file_apierror_internal_proto_error_proto_depIdxs = []int32{
+ 1, // 0: error.Error.error:type_name -> error.Error.Status
+ 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code
+ 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_apierror_internal_proto_error_proto_init() }
+func file_apierror_internal_proto_error_proto_init() {
+ if File_apierror_internal_proto_error_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Error); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Error_Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_apierror_internal_proto_error_proto_goTypes,
+ DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs,
+ MessageInfos: file_apierror_internal_proto_error_proto_msgTypes,
+ }.Build()
+ File_apierror_internal_proto_error_proto = out.File
+ file_apierror_internal_proto_error_proto_rawDesc = nil
+ file_apierror_internal_proto_error_proto_goTypes = nil
+ file_apierror_internal_proto_error_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
new file mode 100644
index 0000000000..4b9b13ce11
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
@@ -0,0 +1,46 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package error;
+
+import "google/protobuf/any.proto";
+import "google/rpc/code.proto";
+
+option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror";
+
+// The error format v2 for Google JSON REST APIs.
+// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
+//
+// NOTE: This schema is not used for other wire protocols.
+message Error {
+ // This message has the same semantics as `google.rpc.Status`. It uses HTTP
+ // status code instead of gRPC status code. It has an extra field `status`
+ // for backward compatibility with Google API Client Libraries.
+ message Status {
+ // The HTTP status code that corresponds to `google.rpc.Status.code`.
+ int32 code = 1;
+ // This corresponds to `google.rpc.Status.message`.
+ string message = 2;
+ // This is the enum version for `google.rpc.Status.code`.
+ google.rpc.Code status = 4;
+ // This corresponds to `google.rpc.Status.details`.
+ repeated google.protobuf.Any details = 5;
+ }
+ // The actual error payload. The nested message structure is for backward
+ // compatibility with Google API client libraries. It also makes the error
+ // more readable to developers.
+ Status error = 1;
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go
index b1d53dd19c..e092005563 100644
--- a/vendor/github.com/googleapis/gax-go/v2/call_option.go
+++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go
@@ -30,9 +30,11 @@
package gax
import (
+ "errors"
"math/rand"
"time"
+ "google.golang.org/api/googleapi"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -47,7 +49,7 @@ type CallOption interface {
// Retryer is used by Invoke to determine retry behavior.
type Retryer interface {
- // Retry reports whether a request should be retriedand how long to pause before retrying
+ // Retry reports whether a request should be retried and how long to pause before retrying
// if the previous attempt returned with err. Invoke never calls Retry with nil error.
Retry(err error) (pause time.Duration, shouldRetry bool)
}
@@ -63,6 +65,31 @@ func WithRetry(fn func() Retryer) CallOption {
return retryerOption(fn)
}
+// OnErrorFunc returns a Retryer that retries if and only if the previous attempt
+// returns an error that satisfies shouldRetry.
+//
+// Pause times between retries are specified by bo. bo is only used for its
+// parameters; each Retryer has its own copy.
+func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer {
+ return &errorRetryer{
+ shouldRetry: shouldRetry,
+ backoff: bo,
+ }
+}
+
+type errorRetryer struct {
+ backoff Backoff
+ shouldRetry func(err error) bool
+}
+
+func (r *errorRetryer) Retry(err error) (time.Duration, bool) {
+ if r.shouldRetry(err) {
+ return r.backoff.Pause(), true
+ }
+
+ return 0, false
+}
+
// OnCodes returns a Retryer that retries if and only if
// the previous attempt returns a GRPC error whose error code is stored in cc.
// Pause times between retries are specified by bo.
@@ -94,22 +121,60 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) {
return 0, false
}
-// Backoff implements exponential backoff.
-// The wait time between retries is a random value between 0 and the "retry envelope".
-// The envelope starts at Initial and increases by the factor of Multiplier every retry,
-// but is capped at Max.
+// OnHTTPCodes returns a Retryer that retries if and only if
+// the previous attempt returns a googleapi.Error whose status code is stored in
+// cc. Pause times between retries are specified by bo.
+//
+// bo is only used for its parameters; each Retryer has its own copy.
+func OnHTTPCodes(bo Backoff, cc ...int) Retryer {
+ codes := make(map[int]bool, len(cc))
+ for _, c := range cc {
+ codes[c] = true
+ }
+
+ return &httpRetryer{
+ backoff: bo,
+ codes: codes,
+ }
+}
+
+type httpRetryer struct {
+ backoff Backoff
+ codes map[int]bool
+}
+
+func (r *httpRetryer) Retry(err error) (time.Duration, bool) {
+ var gerr *googleapi.Error
+ if !errors.As(err, &gerr) {
+ return 0, false
+ }
+
+ if r.codes[gerr.Code] {
+ return r.backoff.Pause(), true
+ }
+
+ return 0, false
+}
+
+// Backoff implements exponential backoff. The wait time between retries is a
+// random value between 0 and the "retry period" - the time between retries. The
+// retry period starts at Initial and increases by the factor of Multiplier
+// every retry, but is capped at Max.
+//
+// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should
+// be built on top of Backoff.
type Backoff struct {
- // Initial is the initial value of the retry envelope, defaults to 1 second.
+ // Initial is the initial value of the retry period, defaults to 1 second.
Initial time.Duration
- // Max is the maximum value of the retry envelope, defaults to 30 seconds.
+ // Max is the maximum value of the retry period, defaults to 30 seconds.
Max time.Duration
- // Multiplier is the factor by which the retry envelope increases.
+ // Multiplier is the factor by which the retry period increases.
// It should be greater than 1 and defaults to 2.
Multiplier float64
- // cur is the current retry envelope
+ // cur is the current retry period.
cur time.Duration
}
@@ -145,6 +210,21 @@ func (o grpcOpt) Resolve(s *CallSettings) {
s.GRPC = o
}
+type pathOpt struct {
+ p string
+}
+
+func (p pathOpt) Resolve(s *CallSettings) {
+ s.Path = p.p
+}
+
+// WithPath applies a Path override to the HTTP-based APICall.
+//
+// This is for internal use only.
+func WithPath(p string) CallOption {
+ return &pathOpt{p: p}
+}
+
// WithGRPCOptions allows passing gRPC call options during client creation.
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
return grpcOpt(append([]grpc.CallOption(nil), opt...))
@@ -158,4 +238,7 @@ type CallSettings struct {
// CallOptions to be forwarded to GRPC.
GRPC []grpc.CallOption
+
+ // Path is an HTTP override for an APICall.
+ Path string
}
diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go
index 3fd1b0b84b..36cdfa33e3 100644
--- a/vendor/github.com/googleapis/gax-go/v2/gax.go
+++ b/vendor/github.com/googleapis/gax-go/v2/gax.go
@@ -35,5 +35,7 @@
// to simplify code generation and to provide more convenient and idiomatic API surfaces.
package gax
+import "github.com/googleapis/gax-go/v2/internal"
+
// Version specifies the gax-go version being used.
-const Version = "2.0.4"
+const Version = internal.Version
diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
new file mode 100644
index 0000000000..bf272a5045
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
@@ -0,0 +1,33 @@
+// Copyright 2022, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package internal
+
+// Version is the current tagged release of the library.
+const Version = "2.4.0"
diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go
index fe31dd004e..9fcc29959b 100644
--- a/vendor/github.com/googleapis/gax-go/v2/invoke.go
+++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go
@@ -33,13 +33,15 @@ import (
"context"
"strings"
"time"
+
+ "github.com/googleapis/gax-go/v2/apierror"
)
// APICall is a user defined call stub.
type APICall func(context.Context, CallSettings) error
-// Invoke calls the given APICall,
-// performing retries as specified by opts, if any.
+// Invoke calls the given APICall, performing retries as specified by opts, if
+// any.
func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
var settings CallSettings
for _, opt := range opts {
@@ -71,9 +73,6 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper
if err == nil {
return nil
}
- if settings.Retry == nil {
- return err
- }
// Never retry permanent certificate errors. (e.x. if ca-certificates
// are not installed). We should only make very few, targeted
// exceptions: many (other) status=Unavailable should be retried, such
@@ -83,6 +82,12 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper
if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
return err
}
+ if apierr, ok := apierror.FromError(err); ok {
+ err = apierr
+ }
+ if settings.Retry == nil {
+ return err
+ }
if retryer == nil {
if r := settings.Retry(); r != nil {
retryer = r
diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go
new file mode 100644
index 0000000000..cc4486eb9e
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go
@@ -0,0 +1,126 @@
+// Copyright 2022, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var (
+ arrayOpen = json.Delim('[')
+ arrayClose = json.Delim(']')
+ errBadOpening = errors.New("unexpected opening token, expected '['")
+)
+
+// ProtoJSONStream represents a wrapper for consuming a stream of protobuf
+// messages encoded using protobuf-JSON format. More information on this format
+// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json.
+// The stream must appear as a comma-delimited, JSON array of obbjects with
+// opening and closing square braces.
+//
+// This is for internal use only.
+type ProtoJSONStream struct {
+ first, closed bool
+ reader io.ReadCloser
+ stream *json.Decoder
+ typ protoreflect.MessageType
+}
+
+// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are
+// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream
+// must be closed when done.
+//
+// This is for internal use only.
+func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream {
+ return &ProtoJSONStream{
+ first: true,
+ reader: rc,
+ stream: json.NewDecoder(rc),
+ typ: typ,
+ }
+}
+
+// Recv decodes the next protobuf message in the stream or returns io.EOF if
+// the stream is done. It is not safe to call Recv on the same stream from
+// different goroutines, just like it is not safe to do so with a single gRPC
+// stream. Type-cast the protobuf message returned to the type provided at
+// ProtoJSONStream creation.
+// Calls to Recv after calling Close will produce io.EOF.
+func (s *ProtoJSONStream) Recv() (proto.Message, error) {
+ if s.closed {
+ return nil, io.EOF
+ }
+ if s.first {
+ s.first = false
+
+ // Consume the opening '[' so Decode gets one object at a time.
+ if t, err := s.stream.Token(); err != nil {
+ return nil, err
+ } else if t != arrayOpen {
+ return nil, errBadOpening
+ }
+ }
+
+ // Capture the next block of data for the item (a JSON object) in the stream.
+ var raw json.RawMessage
+ if err := s.stream.Decode(&raw); err != nil {
+ e := err
+ // To avoid checking the first token of each stream, just attempt to
+ // Decode the next blob and if that fails, double check if it is just
+ // the closing token ']'. If it is the closing, return io.EOF. If it
+ // isn't, return the original error.
+ if t, _ := s.stream.Token(); t == arrayClose {
+ e = io.EOF
+ }
+ return nil, e
+ }
+
+ // Initialize a new instance of the protobuf message to unmarshal the
+ // raw data into.
+ m := s.typ.New().Interface()
+ err := protojson.Unmarshal(raw, m)
+
+ return m, err
+}
+
+// Close closes the stream so that resources are cleaned up.
+func (s *ProtoJSONStream) Close() error {
+ // Dereference the *json.Decoder so that the memory is gc'd.
+ s.stream = nil
+ s.closed = true
+
+ return s.reader.Close()
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json
new file mode 100644
index 0000000000..61ee266a15
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json
@@ -0,0 +1,10 @@
+{
+ "release-type": "go-yoshi",
+ "separate-pull-requests": true,
+ "include-component-in-tag": false,
+ "packages": {
+ "v2": {
+ "component": "v2"
+ }
+ }
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
index 5f0d1fb6a7..5f920e9732 100644
--- a/vendor/github.com/inconshreveable/mousetrap/LICENSE
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -1,13 +1,201 @@
-Copyright 2014 Alan Shreve
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
- http://www.apache.org/licenses/LICENSE-2.0
+ 1. Definitions.
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 Alan Shreve (@inconshreveable)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml
index 730c7fa51b..c56f37c0c9 100644
--- a/vendor/github.com/jmespath/go-jmespath/.travis.yml
+++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml
@@ -12,6 +12,17 @@ go:
- 1.11.x
- 1.12.x
- 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - tip
-install: go get -v -t ./...
-script: make test
+allow_failures:
+ - go: tip
+
+script: make build
+
+matrix:
+ include:
+ - language: go
+ go: 1.15.x
+ script: make test
diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile
index a828d2848f..fb38ec2760 100644
--- a/vendor/github.com/jmespath/go-jmespath/Makefile
+++ b/vendor/github.com/jmespath/go-jmespath/Makefile
@@ -1,6 +1,8 @@
CMD = jpgo
+SRC_PKGS=./ ./cmd/... ./fuzz/...
+
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " test to run all the tests"
@@ -9,21 +11,22 @@ help:
generate:
- go generate ./...
+ go generate ${SRC_PKGS}
build:
rm -f $(CMD)
- go build ./...
+ go build ${SRC_PKGS}
rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
mv cmd/$(CMD)/$(CMD) .
-test:
- go test -v ./...
+test: test-internal-testify
+ echo "making tests ${SRC_PKGS}"
+ go test -v ${SRC_PKGS}
check:
- go vet ./...
- @echo "golint ./..."
- @lint=`golint ./...`; \
+ go vet ${SRC_PKGS}
+ @echo "golint ${SRC_PKGS}"
+ @lint=`golint ${SRC_PKGS}`; \
lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
echo "$$lint"; \
if [ "$$lint" != "" ]; then exit 1; fi
@@ -42,3 +45,7 @@ bench:
pprof-cpu:
go tool pprof ./go-jmespath.test ./cpu.out
+
+test-internal-testify:
+ cd internal/testify && go test ./...
+
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go b/vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go
index 39bb60f450..26c74f79f5 100644
--- a/vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go
+++ b/vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go
@@ -18,9 +18,9 @@ const (
DockerCSIPluginCap = "csinode"
)
-// PluginManager manages the multiple CSI plugins that may be in use on the
-// node. PluginManager should be thread-safe.
-type PluginManager interface {
+// Manager manages the multiple CSI plugins that may be in use on the
+// node. Manager should be thread-safe.
+type Manager interface {
// Get gets the plugin with the given name
Get(name string) (NodePlugin, error)
@@ -43,7 +43,7 @@ type pluginManager struct {
pg plugingetter.PluginGetter
}
-func NewPluginManager(pg plugingetter.PluginGetter, secrets SecretGetter) PluginManager {
+func NewManager(pg plugingetter.PluginGetter, secrets SecretGetter) Manager {
return &pluginManager{
plugins: map[string]NodePlugin{},
newNodePluginFunc: NewNodePlugin,
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager_deprecated.go b/vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager_deprecated.go
new file mode 100644
index 0000000000..5c814c7e93
--- /dev/null
+++ b/vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager_deprecated.go
@@ -0,0 +1,11 @@
+package plugin
+
+// Deprecated: use [Manager].
+//
+//nolint:revive // exported: type name will be used as plugin.PluginManager by other packages
+type PluginManager = Manager
+
+// Deprecated: use [NewManager].
+//
+//nolint:unused
+var NewPluginManager = NewManager
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go b/vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go
index a2127fc963..46fd772cf9 100644
--- a/vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go
+++ b/vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go
@@ -17,7 +17,7 @@ import (
"github.com/moby/swarmkit/v2/volumequeue"
)
-const CSI_CALL_TIMEOUT = 15 * time.Second
+const csiCallTimeout = 15 * time.Second
// volumeState keeps track of the state of a volume on this node.
type volumeState struct {
@@ -39,8 +39,8 @@ type volumes struct {
// volumes is a mapping of volume ID to volumeState
volumes map[string]volumeState
- // plugins is the PluginManager, which provides translation to the CSI RPCs
- plugins plugin.PluginManager
+ // plugins is the Manager, which provides translation to the CSI RPCs
+ plugins plugin.Manager
// pendingVolumes is a VolumeQueue which manages which volumes are
// processed and when.
@@ -51,7 +51,7 @@ type volumes struct {
func NewManager(pg plugingetter.PluginGetter, secrets exec.SecretGetter) exec.VolumesManager {
r := &volumes{
volumes: map[string]volumeState{},
- plugins: plugin.NewPluginManager(pg, secrets),
+ plugins: plugin.NewManager(pg, secrets),
pendingVolumes: volumequeue.NewVolumeQueue(),
}
go r.retryVolumes()
@@ -107,7 +107,7 @@ func (r *volumes) tryVolume(ctx context.Context, id string, attempt uint) {
// These are too complicated to be worth the engineering effort at this
// time.
- timeoutCtx, cancel := context.WithTimeout(ctx, CSI_CALL_TIMEOUT)
+ timeoutCtx, cancel := context.WithTimeout(ctx, csiCallTimeout)
// always gotta call the WithTimeout cancel
defer cancel()
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/exec/controller_stub.go b/vendor/github.com/moby/swarmkit/v2/agent/exec/controller_stub.go
index 6775779e59..a116f15611 100644
--- a/vendor/github.com/moby/swarmkit/v2/agent/exec/controller_stub.go
+++ b/vendor/github.com/moby/swarmkit/v2/agent/exec/controller_stub.go
@@ -21,7 +21,6 @@ type StubController struct {
RemoveFn func(ctx context.Context) error
CloseFn func() error
calls map[string]int
- cstatus *api.ContainerStatus
}
// NewStubController returns an initialized StubController
@@ -38,7 +37,7 @@ func (sc *StubController) called() {
if !ok {
panic("Failed to find caller of function")
}
- // longName looks like 'github.com/docker/swarmkit/agent/exec.(*StubController).Prepare:1'
+ // longName looks like 'github.com/moby/swarmkit/agent/exec.(*StubController).Prepare:1'
longName := runtime.FuncForPC(pc).Name()
parts := strings.Split(longName, ".")
tail := strings.Split(parts[len(parts)-1], ":")
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/exec/executor.go b/vendor/github.com/moby/swarmkit/v2/agent/exec/executor.go
index 61a305aec6..6469fddbf3 100644
--- a/vendor/github.com/moby/swarmkit/v2/agent/exec/executor.go
+++ b/vendor/github.com/moby/swarmkit/v2/agent/exec/executor.go
@@ -112,9 +112,9 @@ type VolumesManager interface {
Plugins() VolumePluginManager
}
-// PluginManager is the interface for accessing the volume plugin manager from
+// VolumePluginManager is the interface for accessing the volume plugin manager from
// the executor. This is identical to
-// github.com/docker/swarmkit/agent/csi/plugin.PluginManager, except the former
+// github.com/moby/swarmkit/agent/csi/plugin.PluginManager, except the former
// also includes a Get method for the VolumesManager to use. This does not
// contain that Get method, to avoid having to import the Plugin type, and
// because in this context, it is not needed.
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/reporter.go b/vendor/github.com/moby/swarmkit/v2/agent/reporter.go
index db7456c3b7..0abb565a03 100644
--- a/vendor/github.com/moby/swarmkit/v2/agent/reporter.go
+++ b/vendor/github.com/moby/swarmkit/v2/agent/reporter.go
@@ -15,7 +15,7 @@ type StatusReporter interface {
UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error
}
-// Reporter recieves update to both task and volume status.
+// Reporter receives update to both task and volume status.
type Reporter interface {
StatusReporter
ReportVolumeUnpublished(ctx context.Context, volumeID string) error
@@ -27,12 +27,15 @@ func (fn statusReporterFunc) UpdateTaskStatus(ctx context.Context, taskID string
return fn(ctx, taskID, status)
}
+//nolint:unused // currently only used in tests.
type volumeReporterFunc func(ctx context.Context, volumeID string) error
+//nolint:unused // currently only used in tests.
func (fn volumeReporterFunc) ReportVolumeUnpublished(ctx context.Context, volumeID string) error {
return fn(ctx, volumeID)
}
+//nolint:unused // currently only used in tests.
type statusReporterCombined struct {
statusReporterFunc
volumeReporterFunc
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/session.go b/vendor/github.com/moby/swarmkit/v2/agent/session.go
index 97d5621eb9..e751f4a654 100644
--- a/vendor/github.com/moby/swarmkit/v2/agent/session.go
+++ b/vendor/github.com/moby/swarmkit/v2/agent/session.go
@@ -391,6 +391,7 @@ func (s *session) sendTaskStatus(ctx context.Context, taskID string, taskStatus
return nil
}
+//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTaskStatusRequest_TaskStatusUpdate) ([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, error) {
if len(updates) < 1 {
return nil, nil
diff --git a/vendor/github.com/moby/swarmkit/v2/agent/worker.go b/vendor/github.com/moby/swarmkit/v2/agent/worker.go
index ad51aa716e..2143f3506d 100644
--- a/vendor/github.com/moby/swarmkit/v2/agent/worker.go
+++ b/vendor/github.com/moby/swarmkit/v2/agent/worker.go
@@ -57,7 +57,6 @@ type statusReporterKey struct {
type worker struct {
db *bolt.DB
executor exec.Executor
- publisher exec.LogPublisher
listeners map[*statusReporterKey]struct{}
taskevents *watch.Queue
publisherProvider exec.LogPublisherProvider
diff --git a/vendor/github.com/moby/swarmkit/v2/ca/certificates.go b/vendor/github.com/moby/swarmkit/v2/ca/certificates.go
index 49ea63dd24..edf26ac1ec 100644
--- a/vendor/github.com/moby/swarmkit/v2/ca/certificates.go
+++ b/vendor/github.com/moby/swarmkit/v2/ca/certificates.go
@@ -754,7 +754,7 @@ func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbro
io.Copy(verifier, bytes.NewReader(response.Certificate))
if !verifier.Verified() {
- return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex())
+ return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Encoded())
}
}
diff --git a/vendor/github.com/moby/swarmkit/v2/ca/config.go b/vendor/github.com/moby/swarmkit/v2/ca/config.go
index f70052bc59..45b84a43d8 100644
--- a/vendor/github.com/moby/swarmkit/v2/ca/config.go
+++ b/vendor/github.com/moby/swarmkit/v2/ca/config.go
@@ -367,16 +367,16 @@ func GenerateJoinToken(rootCA *RootCA, fips bool) string {
panic(fmt.Errorf("failed to read random bytes: %v", err))
}
- var nn, digest big.Int
+ var nn, dgst big.Int
nn.SetBytes(secretBytes[:])
- digest.SetString(rootCA.Digest.Hex(), 16)
+ dgst.SetString(rootCA.Digest.Encoded(), 16)
fmtString := "SWMTKN-1-%0[1]*s-%0[3]*s"
if fips {
fmtString = "SWMTKN-2-1-%0[1]*s-%0[3]*s"
}
return fmt.Sprintf(fmtString, base36DigestLen,
- digest.Text(joinTokenBase), maxGeneratedSecretLength, nn.Text(joinTokenBase))
+ dgst.Text(joinTokenBase), maxGeneratedSecretLength, nn.Text(joinTokenBase))
}
// DownloadRootCA tries to retrieve a remote root CA and matches the digest against the provided token.
diff --git a/vendor/github.com/moby/swarmkit/v2/ca/server.go b/vendor/github.com/moby/swarmkit/v2/ca/server.go
index 44a51b5e24..19f495c4ed 100644
--- a/vendor/github.com/moby/swarmkit/v2/ca/server.go
+++ b/vendor/github.com/moby/swarmkit/v2/ca/server.go
@@ -65,7 +65,6 @@ type Server struct {
signingMu sync.Mutex
// lets us monitor and finish root rotations
- rootReconciler *rootRotationReconciler
rootReconciliationRetryInterval time.Duration
}
diff --git a/vendor/github.com/moby/swarmkit/v2/manager/allocator/cnmallocator/portallocator.go b/vendor/github.com/moby/swarmkit/v2/manager/allocator/cnmallocator/portallocator.go
index 303ac13b6b..3e5e1c4443 100644
--- a/vendor/github.com/moby/swarmkit/v2/manager/allocator/cnmallocator/portallocator.go
+++ b/vendor/github.com/moby/swarmkit/v2/manager/allocator/cnmallocator/portallocator.go
@@ -297,10 +297,6 @@ func (pa *portAllocator) hostPublishPortsNeedUpdate(s *api.Service) bool {
return false
}
-func (pa *portAllocator) isPortsAllocated(s *api.Service) bool {
- return pa.isPortsAllocatedOnInit(s, false)
-}
-
func (pa *portAllocator) isPortsAllocatedOnInit(s *api.Service, onInit bool) bool {
// If service has no user-defined endpoint and allocated endpoint,
// we assume it is allocated and return true.
diff --git a/vendor/github.com/moby/swarmkit/v2/manager/allocator/network.go b/vendor/github.com/moby/swarmkit/v2/manager/allocator/network.go
index d39f8627d9..673da84996 100644
--- a/vendor/github.com/moby/swarmkit/v2/manager/allocator/network.go
+++ b/vendor/github.com/moby/swarmkit/v2/manager/allocator/network.go
@@ -375,6 +375,7 @@ func isOverlayNetwork(n *api.Network) bool {
return false
}
+//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (a *Allocator) getAllocatedNetworks() ([]*api.Network, error) {
var (
err error
@@ -506,6 +507,7 @@ func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly boo
return nil
}
+//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (a *Allocator) deallocateNodes(ctx context.Context) error {
var (
nodes []*api.Node
diff --git a/vendor/github.com/moby/swarmkit/v2/manager/scheduler/volumes.go b/vendor/github.com/moby/swarmkit/v2/manager/scheduler/volumes.go
index 12383c98ef..9ddba6be16 100644
--- a/vendor/github.com/moby/swarmkit/v2/manager/scheduler/volumes.go
+++ b/vendor/github.com/moby/swarmkit/v2/manager/scheduler/volumes.go
@@ -50,9 +50,11 @@ func newVolumeSet() *volumeSet {
}
}
+// getVolume returns the volume object for the given ID as stored in the
+// volumeSet, or nil if none exists.
+//
+//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (vs *volumeSet) getVolume(id string) *api.Volume {
- // getVolume returns the volume object for the given ID as stored in the
- // volumeSet, or nil if none exists
return vs.volumes[id].volume
}
@@ -77,6 +79,7 @@ func (vs *volumeSet) addOrUpdateVolume(v *api.Volume) {
vs.byName[v.Spec.Annotations.Name] = v.ID
}
+//nolint:unused // only used in tests.
func (vs *volumeSet) removeVolume(volumeID string) {
if info, ok := vs.volumes[volumeID]; ok {
// if the volume exists in the set, look up its group ID and remove it
diff --git a/vendor/github.com/moby/swarmkit/v2/manager/state/raft/raft.go b/vendor/github.com/moby/swarmkit/v2/manager/state/raft/raft.go
index 86e313958e..3d607b1350 100644
--- a/vendor/github.com/moby/swarmkit/v2/manager/state/raft/raft.go
+++ b/vendor/github.com/moby/swarmkit/v2/manager/state/raft/raft.go
@@ -132,8 +132,7 @@ type Node struct {
// RemovedFromRaft notifies about node deletion from raft cluster
RemovedFromRaft chan struct{}
cancelFunc func()
- // removeRaftCh notifies about node deletion from raft cluster
- removeRaftCh chan struct{}
+
removeRaftOnce sync.Once
leadershipBroadcast *watch.Queue
@@ -1289,6 +1288,7 @@ func (n *Node) processRaftMessageLogger(ctx context.Context, msg *api.ProcessRaf
return log.G(ctx).WithFields(fields)
}
+//nolint:unused // currently unused, but should be used again; see TODO in Node.ProcessRaftMessage
func (n *Node) reportNewAddress(ctx context.Context, id uint64) error {
// too early
if !n.IsMember() {
@@ -1418,9 +1418,9 @@ func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessa
// See https://github.com/docker/docker/issues/30455.
// This should be reenabled in the future with additional
// safeguards (perhaps storing multiple addresses per node).
- //if err := n.reportNewAddress(ctx, msg.Message.From); err != nil {
+ // if err := n.reportNewAddress(ctx, msg.Message.From); err != nil {
// log.G(ctx).WithError(err).Errorf("failed to report new address of %x to transport", msg.Message.From)
- //}
+ // }
// Reject vote requests from unreachable peers
if msg.Message.Type == raftpb.MsgVote {
diff --git a/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/snapwrap.go b/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/snapwrap.go
index 02f9afea7f..7ae6d595ca 100644
--- a/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/snapwrap.go
+++ b/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/snapwrap.go
@@ -8,7 +8,6 @@ import (
"github.com/moby/swarmkit/v2/manager/encryption"
"github.com/pkg/errors"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
)
@@ -118,12 +117,10 @@ func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory)
}
tmpdirpath := filepath.Clean(newDir) + ".tmp"
- if fileutil.Exist(tmpdirpath) {
- if err := os.RemoveAll(tmpdirpath); err != nil {
- return errors.Wrap(err, "could not remove temporary snapshot directory")
- }
+ if err := os.RemoveAll(tmpdirpath); err != nil {
+ return errors.Wrap(err, "could not remove temporary snapshot directory")
}
- if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
+ if err := os.MkdirAll(tmpdirpath, 0o700); err != nil {
return errors.Wrap(err, "could not create temporary snapshot directory")
}
tmpSnapshotter := newFactory.New(tmpdirpath)
diff --git a/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/walwrap.go b/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/walwrap.go
index 2fd0a91804..48252059eb 100644
--- a/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/walwrap.go
+++ b/vendor/github.com/moby/swarmkit/v2/manager/state/raft/storage/walwrap.go
@@ -174,7 +174,11 @@ func ReadRepairWAL(
return nil, WALData{}, errors.Wrap(err, "failed to decrypt WAL")
}
// we can only repair ErrUnexpectedEOF and we never repair twice.
- if repaired || err != io.ErrUnexpectedEOF {
+ if repaired || !errors.Is(err, io.ErrUnexpectedEOF) {
+ // TODO(thaJeztah): should ReadRepairWAL be updated to handle cases where
+ // some (last) of the files cannot be recovered? ("best effort" recovery?)
+ // Or should an informative error be produced to help the user (which could
+ // mean: remove the last file?). See TestReadRepairWAL for more details.
return nil, WALData{}, errors.Wrap(err, "irreparable WAL error")
}
if !wal.Repair(nil, walDir) {
diff --git a/vendor/github.com/moby/swarmkit/v2/node/node.go b/vendor/github.com/moby/swarmkit/v2/node/node.go
index 5e57ec32d0..ab5cc7b92d 100644
--- a/vendor/github.com/moby/swarmkit/v2/node/node.go
+++ b/vendor/github.com/moby/swarmkit/v2/node/node.go
@@ -15,9 +15,6 @@ import (
"sync"
"time"
- "github.com/moby/swarmkit/v2/ca/keyutils"
- "github.com/moby/swarmkit/v2/identity"
-
"github.com/docker/docker/libnetwork/drivers/overlay/overlayutils"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/go-metrics"
@@ -26,7 +23,9 @@ import (
"github.com/moby/swarmkit/v2/agent/exec"
"github.com/moby/swarmkit/v2/api"
"github.com/moby/swarmkit/v2/ca"
+ "github.com/moby/swarmkit/v2/ca/keyutils"
"github.com/moby/swarmkit/v2/connectionbroker"
+ "github.com/moby/swarmkit/v2/identity"
"github.com/moby/swarmkit/v2/ioutils"
"github.com/moby/swarmkit/v2/log"
"github.com/moby/swarmkit/v2/manager"
@@ -45,6 +44,7 @@ import (
const (
stateFilename = "state.json"
roleChangeTimeout = 16 * time.Second
+ certDirectory = "certificates"
)
var (
@@ -53,7 +53,6 @@ var (
errNodeStarted = errors.New("node: already started")
errNodeNotStarted = errors.New("node: not started")
- certDirectory = "certificates"
// ErrInvalidUnlockKey is returned when we can't decrypt the TLS certificate
ErrInvalidUnlockKey = errors.New("node is locked, and needs a valid unlock key")
@@ -1028,7 +1027,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
// The context used to start this might have a logger associated with it
// that we'd like to reuse, but we don't want to use that context, so we
// pass to the goroutine only the logger, and create a new context with
- //that logger.
+ // that logger.
go func(logger *logrus.Entry) {
if err := m.Run(log.WithLogger(context.Background(), logger)); err != nil {
runErr = err
diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go
index 65556027a6..8a5e09f584 100644
--- a/vendor/github.com/moby/term/tc.go
+++ b/vendor/github.com/moby/term/tc.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package term
diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go
index 29c6acf1c7..8c1fc1b5a5 100644
--- a/vendor/github.com/moby/term/term.go
+++ b/vendor/github.com/moby/term/term.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
// Package term provides structures and helper functions to work with
@@ -14,10 +15,8 @@ import (
"golang.org/x/sys/unix"
)
-var (
- // ErrInvalidState is returned if the state of the terminal is invalid.
- ErrInvalidState = errors.New("Invalid terminal state")
-)
+// ErrInvalidState is returned if the state of the terminal is invalid.
+var ErrInvalidState = errors.New("Invalid terminal state")
// State represents the state of the terminal.
type State struct {
diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go
index ba82960d4a..3cdc8edbda 100644
--- a/vendor/github.com/moby/term/term_windows.go
+++ b/vendor/github.com/moby/term/term_windows.go
@@ -66,10 +66,6 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
}
}
- // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and
- // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as
- // go-ansiterm hasn't switch to x/sys/windows.
- // TODO: switch back to x/sys/windows once go-ansiterm has switched
if emulateStdin {
h := uint32(windows.STD_INPUT_HANDLE)
stdIn = windowsconsole.NewAnsiReader(int(h))
diff --git a/vendor/github.com/moby/term/termios.go b/vendor/github.com/moby/term/termios.go
index 0f028e2273..99c0f7de60 100644
--- a/vendor/github.com/moby/term/termios.go
+++ b/vendor/github.com/moby/term/termios.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package term
diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go
index 922dd4baab..45f77e03c7 100644
--- a/vendor/github.com/moby/term/termios_bsd.go
+++ b/vendor/github.com/moby/term/termios_bsd.go
@@ -1,3 +1,4 @@
+//go:build darwin || freebsd || openbsd || netbsd
// +build darwin freebsd openbsd netbsd
package term
diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go
index 038fd61ba1..88b7b21563 100644
--- a/vendor/github.com/moby/term/termios_nonbsd.go
+++ b/vendor/github.com/moby/term/termios_nonbsd.go
@@ -1,4 +1,5 @@
-//+build !darwin,!freebsd,!netbsd,!openbsd,!windows
+//go:build !darwin && !freebsd && !netbsd && !openbsd && !windows
+// +build !darwin,!freebsd,!netbsd,!openbsd,!windows
package term
diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go
index 155251521b..f32aa537ef 100644
--- a/vendor/github.com/moby/term/windows/ansi_reader.go
+++ b/vendor/github.com/moby/term/windows/ansi_reader.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package windowsconsole
@@ -190,7 +191,6 @@ func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) stri
// <Ctrl>-S Suspends printing on the screen (does not stop the program).
// <Ctrl>-U Deletes all characters on the current line. Also called the KILL key.
// <Ctrl>-E Quits current command and creates a core
-
}
// <Alt>+Key generates ESC N Key
diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go
index ccb5ef0775..4243307fd3 100644
--- a/vendor/github.com/moby/term/windows/ansi_writer.go
+++ b/vendor/github.com/moby/term/windows/ansi_writer.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package windowsconsole
diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go
index 993694ddcd..116b74e8f5 100644
--- a/vendor/github.com/moby/term/windows/console.go
+++ b/vendor/github.com/moby/term/windows/console.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package windowsconsole
diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go
index 1ef98d5996..bea8d4595c 100644
--- a/vendor/github.com/moby/term/winsize.go
+++ b/vendor/github.com/moby/term/winsize.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package term
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index ac1ca3cf5f..cf05079fb8 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -69,9 +69,9 @@ type Collector interface {
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
-// func (c customCollector) Describe(ch chan<- *Desc) {
-// DescribeByCollect(c, ch)
-// }
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 00d70f09b6..de30de6daa 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -51,7 +51,7 @@ type Counter interface {
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
-// than 64 runes in total.
+// than 128 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 4bb816ab75..8bc5e44e2f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -20,6 +20,9 @@ import (
"strings"
"github.com/cespare/xxhash/v2"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
@@ -154,7 +157,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
Value: proto.String(v),
})
}
- sort.Sort(labelPairSorter(d.constLabelPairs))
+ sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
return d
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
new file mode 100644
index 0000000000..614fd61be9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "os"
+
+func getPIDFn() func() (int, error) {
+ pid := os.Getpid()
+ return func() (int, error) {
+ return pid, nil
+ }
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/gen.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
index 1d0c9a7ffd..eaf8059ee1 100644
--- a/vendor/github.com/google/certificate-transparency-go/client/configpb/gen.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
@@ -1,10 +1,9 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
+// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,6 +11,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package configpb
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
-//go:generate protoc -I=. -I=$GOPATH/src --go_out=:. multilog.proto
+func getPIDFn() func() (int, error) {
+ return func() (int, error) {
+ return 1, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index 08195b4102..ad9a71a5e0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -19,6 +19,10 @@ import (
"time"
)
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
+// populated using runtime/metrics.
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
@@ -197,14 +201,6 @@ func goRuntimeMemStats() memStatsMetrics {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
},
}
}
@@ -232,7 +228,7 @@ func newBaseGoCollector() baseGoCollector {
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
- memstatNamespace("last_gc_time_seconds"),
+ "go_memstats_last_gc_time_seconds",
"Number of seconds since 1970 of last garbage collection.",
nil, nil),
goInfoDesc: NewDesc(
@@ -254,8 +250,9 @@ func (c *baseGoCollector) Describe(ch chan<- *Desc) {
// Collect returns the current state of all metrics of the collector.
func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
- n, _ := runtime.ThreadCreateProfile(nil)
- ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+ n := getRuntimeNumThreads()
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
@@ -268,7 +265,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) {
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
-
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
index 24526131e7..897a6e906b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -40,13 +40,28 @@ type goCollector struct {
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
+ msMetrics := goRuntimeMemStats()
+ msMetrics = append(msMetrics, struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+ }{
+ // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ })
return &goCollector{
base: newBaseGoCollector(),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
- msMetrics: goRuntimeMemStats(),
+ msMetrics: msMetrics,
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
index d43bdcddab..3a2d55e84b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -25,10 +25,72 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
- "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+)
+
+const (
+ // constants for strings referenced more than once.
+ goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
+ goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
+ goGCHeapFreesObjects = "/gc/heap/frees:objects"
+ goGCHeapFreesBytes = "/gc/heap/frees:bytes"
+ goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
+ goGCHeapObjects = "/gc/heap/objects:objects"
+ goGCHeapGoalBytes = "/gc/heap/goal:bytes"
+ goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
+ goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
+ goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
+ goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
+ goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
+ goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
+ goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
+ goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
+ goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
+ goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
+ goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
+ goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
+ goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
+ goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
)
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
+var rmNamesForMemStatsMetrics = []string{
+ goGCHeapTinyAllocsObjects,
+ goGCHeapAllocsObjects,
+ goGCHeapFreesObjects,
+ goGCHeapAllocsBytes,
+ goGCHeapObjects,
+ goGCHeapGoalBytes,
+ goMemoryClassesTotalBytes,
+ goMemoryClassesHeapObjectsBytes,
+ goMemoryClassesHeapUnusedBytes,
+ goMemoryClassesHeapReleasedBytes,
+ goMemoryClassesHeapFreeBytes,
+ goMemoryClassesHeapStacksBytes,
+ goMemoryClassesOSStacksBytes,
+ goMemoryClassesMetadataMSpanInuseBytes,
+ goMemoryClassesMetadataMSPanFreeBytes,
+ goMemoryClassesMetadataMCacheInuseBytes,
+ goMemoryClassesMetadataMCacheFreeBytes,
+ goMemoryClassesProfilingBucketsBytes,
+ goMemoryClassesMetadataOtherBytes,
+ goMemoryClassesOtherBytes,
+}
+
+func bestEffortLookupRM(lookup []string) []metrics.Description {
+ ret := make([]metrics.Description, 0, len(lookup))
+ for _, rm := range metrics.All() {
+ for _, m := range lookup {
+ if m == rm.Name {
+ ret = append(ret, rm)
+ }
+ }
+ }
+ return ret
+}
+
type goCollector struct {
base baseGoCollector
@@ -36,70 +98,124 @@ type goCollector struct {
// snapshot is always produced by Collect.
mu sync.Mutex
- // rm... fields all pertain to the runtime/metrics package.
- rmSampleBuf []metrics.Sample
- rmSampleMap map[string]*metrics.Sample
- rmMetrics []collectorMetric
+ // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
+ sampleBuf []metrics.Sample
+ // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
+ sampleMap map[string]*metrics.Sample
+
+ // rmExposedMetrics represents all runtime/metrics package metrics
+ // that were configured to be exposed.
+ rmExposedMetrics []collectorMetric
+ rmExactSumMapForHist map[string]string
// With Go 1.17, the runtime/metrics package was introduced.
// From that point on, metric names produced by the runtime/metrics
// package could be generated from runtime/metrics names. However,
// these differ from the old names for the same values.
//
- // This field exist to export the same values under the old names
+ // This field exists to export the same values under the old names
// as well.
- msMetrics memStatsMetrics
+ msMetrics memStatsMetrics
+ msMetricsEnabled bool
+}
+
+type rmMetricDesc struct {
+ metrics.Description
+}
+
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
+ var descs []rmMetricDesc
+ for _, d := range metrics.All() {
+ var (
+ deny = true
+ desc rmMetricDesc
+ )
+
+ for _, r := range rules {
+ if !r.Matcher.MatchString(d.Name) {
+ continue
+ }
+ deny = r.Deny
+ }
+ if deny {
+ continue
+ }
+
+ desc.Description = d
+ descs = append(descs, desc)
+ }
+ return descs
+}
+
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
+ return internal.GoCollectorOptions{
+ RuntimeMetricSumForHist: map[string]string{
+ "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
+ "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
+ },
+ RuntimeMetricRules: []internal.GoCollectorRule{
+ //{Matcher: regexp.MustCompile("")},
+ },
+ }
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
- descriptions := metrics.All()
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
+ opt := defaultGoCollectorOptions()
+ for _, o := range opts {
+ o(&opt)
+ }
+
+ exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
- for _, d := range descriptions {
+ for _, d := range exposedDescriptions {
if d.Kind == metrics.KindFloat64Histogram {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
- metrics.Read(histograms)
+
+ if len(histograms) > 0 {
+ metrics.Read(histograms)
+ }
+
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
}
- // Generate a Desc and ValueType for each runtime/metrics metric.
- metricSet := make([]collectorMetric, 0, len(descriptions))
- sampleBuf := make([]metrics.Sample, 0, len(descriptions))
- sampleMap := make(map[string]*metrics.Sample, len(descriptions))
- for i := range descriptions {
- d := &descriptions[i]
- namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
+ // Generate a collector for each exposed runtime/metrics metric.
+ metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
+ // SampleBuf is used for reading from runtime/metrics.
+ // We are assuming the largest case to have stable pointers for sampleMap purposes.
+ sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
+ sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
+ for _, d := range exposedDescriptions {
+ namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
- // to fail here. This condition is tested elsewhere.
+ // to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
- // Set up sample buffer for reading, and a map
- // for quick lookup of sample values.
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m collectorMetric
if d.Kind == metrics.KindFloat64Histogram {
- _, hasSum := rmExactSumMap[d.Name]
+ _, hasSum := opt.RuntimeMetricSumForHist[d.Name]
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
- d.Description,
+ d.Description.Description,
nil,
nil,
),
@@ -111,24 +227,61 @@ func NewGoCollector() Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description,
- })
+ Help: d.Description.Description,
+ },
+ )
} else {
m = NewGauge(GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description,
+ Help: d.Description.Description,
})
}
metricSet = append(metricSet, m)
}
+
+ // Add exact sum metrics to sampleBuf if not added before.
+ for _, h := range histograms {
+ sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
+ if !ok {
+ continue
+ }
+
+ if _, ok := sampleMap[sumMetric]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
+ sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
+ }
+
+ var (
+ msMetrics memStatsMetrics
+ msDescriptions []metrics.Description
+ )
+
+ if !opt.DisableMemStatsLikeMetrics {
+ msMetrics = goRuntimeMemStats()
+ msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
+
+ // Check if metric was not exposed before and if not, add to sampleBuf.
+ for _, mdDesc := range msDescriptions {
+ if _, ok := sampleMap[mdDesc.Name]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
+ sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
+ }
+ }
+
return &goCollector{
- base: newBaseGoCollector(),
- rmSampleBuf: sampleBuf,
- rmSampleMap: sampleMap,
- rmMetrics: metricSet,
- msMetrics: goRuntimeMemStats(),
+ base: newBaseGoCollector(),
+ sampleBuf: sampleBuf,
+ sampleMap: sampleMap,
+ rmExposedMetrics: metricSet,
+ rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
+ msMetrics: msMetrics,
+ msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
}
}
@@ -138,7 +291,7 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
for _, i := range c.msMetrics {
ch <- i.desc
}
- for _, m := range c.rmMetrics {
+ for _, m := range c.rmExposedMetrics {
ch <- m.Desc()
}
}
@@ -148,8 +301,12 @@ func (c *goCollector) Collect(ch chan<- Metric) {
// Collect base non-memory metrics.
c.base.Collect(ch)
+ if len(c.sampleBuf) == 0 {
+ return
+ }
+
// Collect must be thread-safe, so prevent concurrent use of
- // rmSampleBuf. Just read into rmSampleBuf but write all the data
+ // sampleBuf elements. Just read into sampleBuf but write all the data
// we get into our Metrics or MemStats.
//
// This lock also ensures that the Metrics we send out are all from
@@ -164,14 +321,17 @@ func (c *goCollector) Collect(ch chan<- Metric) {
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
- metrics.Read(c.rmSampleBuf)
+ metrics.Read(c.sampleBuf)
+
+ // Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
+ for i, metric := range c.rmExposedMetrics {
+ // We created samples for exposed metrics first in order, so indexes match.
+ sample := c.sampleBuf[i]
- // Update all our metrics from rmSampleBuf.
- for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
- switch m := c.rmMetrics[i].(type) {
+ switch m := metric.(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
@@ -191,12 +351,15 @@ func (c *goCollector) Collect(ch chan<- Metric) {
panic("unexpected metric type")
}
}
- // ms is a dummy MemStats that we populate ourselves so that we can
- // populate the old metrics from it.
- var ms runtime.MemStats
- memStatsFromRM(&ms, c.rmSampleMap)
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+
+ if c.msMetricsEnabled {
+ // ms is a dummy MemStats that we populate ourselves so that we can
+ // populate the old metrics from it if goMemStatsCollection is enabled.
+ var ms runtime.MemStats
+ memStatsFromRM(&ms, c.sampleMap)
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+ }
}
}
@@ -224,11 +387,6 @@ func unwrapScalarRMValue(v metrics.Value) float64 {
}
}
-var rmExactSumMap = map[string]string{
- "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
- "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
-}
-
// exactSumFor takes a runtime/metrics metric name (that is assumed to
// be of kind KindFloat64Histogram) and returns its exact sum and whether
// its exact sum exists.
@@ -236,11 +394,11 @@ var rmExactSumMap = map[string]string{
// The runtime/metrics API for histograms doesn't currently expose exact
// sums, but some of the other metrics are in fact exact sums of histograms.
func (c *goCollector) exactSumFor(rmName string) float64 {
- sumName, ok := rmExactSumMap[rmName]
+ sumName, ok := c.rmExactSumMapForHist[rmName]
if !ok {
return 0
}
- s, ok := c.rmSampleMap[sumName]
+ s, ok := c.sampleMap[sumName]
if !ok {
return 0
}
@@ -261,35 +419,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
// while having Mallocs - Frees still represent a live object count.
// Unfortunately, MemStats doesn't actually export a large allocation count,
// so it's impossible to pull this number out directly.
- tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
- ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
- ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
+ tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
+ ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
+ ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
- ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
- ms.Sys = lookupOrZero("/memory/classes/total:bytes")
+ ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
+ ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
ms.Lookups = 0 // Already always zero.
- ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
+ ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
ms.Alloc = ms.HeapAlloc
- ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
- ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
- ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
+ ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
+ ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
+ ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
- ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
- ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
- ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
- ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
- ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
- ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
- ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
- ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
- ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
- ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
- ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
-
- // N.B. LastGC is omitted because runtime.GCStats already has this.
- // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
- // for more details.
- ms.LastGC = 0
+ ms.HeapObjects = lookupOrZero(goGCHeapObjects)
+ ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
+ ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
+ ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
+ ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
+ ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
+ ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
+ ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
+ ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
+ ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
+ ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime
@@ -324,6 +477,11 @@ type batchHistogram struct {
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+ // We need to remove -Inf values. runtime/metrics keeps them around.
+ // But -Inf bucket should not be allowed for prometheus histograms.
+ if buckets[0] == math.Inf(-1) {
+ buckets = buckets[1:]
+ }
h := &batchHistogram{
desc: desc,
buckets: buckets,
@@ -382,8 +540,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error {
for i, count := range h.counts {
totalCount += count
if !h.hasSum {
- // N.B. This computed sum is an underestimate.
- sum += h.buckets[i] * float64(count)
+ if count != 0 {
+ // N.B. This computed sum is an underestimate.
+ sum += h.buckets[i] * float64(count)
+ }
}
// Skip the +Inf bucket, but only for the bucket list.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 893802fd6b..0d47fecdc2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -581,11 +581,11 @@ func (h *constHistogram) Desc() *Desc {
func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
+
buckets := make([]*dto.Bucket, 0, len(h.buckets))
his.SampleCount = proto.Uint64(h.count)
his.SampleSum = proto.Float64(h.sum)
-
for upperBound, count := range h.buckets {
buckets = append(buckets, &dto.Bucket{
CumulativeCount: proto.Uint64(count),
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
new file mode 100644
index 0000000000..fd45cadc0c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -0,0 +1,651 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// Maintaining `GetUnifiedDiffString` here because original repository
+// (https://github.com/pmezard/go-difflib) is no loger maintained.
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool,
+) *SequenceMatcher {
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{
+ c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n),
+ })
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s]++
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches++
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning-- // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return w.String(), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
new file mode 100644
index 0000000000..723b45d644
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "regexp"
+
+type GoCollectorRule struct {
+ Matcher *regexp.Regexp
+ Deny bool
+}
+
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
+// Use it via collectors package instead. See issue
+// https://github.com/prometheus/client_golang/issues/1030.
+//
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
+type GoCollectorOptions struct {
+ DisableMemStatsLikeMetrics bool
+ RuntimeMetricSumForHist map[string]string
+ RuntimeMetricRules []GoCollectorRule
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
index fe0a52180e..97d17d6cb6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -61,9 +61,9 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
// name has - replaced with _ and is concatenated with the unit and
// other data.
name = strings.ReplaceAll(name, "-", "_")
- name = name + "_" + unit
- if d.Cumulative {
- name = name + "_total"
+ name += "_" + unit
+ if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
+ name += "_total"
}
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
@@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
- // Rebucket as powers of 2.
- return rebucketExp(buckets, 2)
+ // Re-bucket as powers of 2.
+ return reBucketExp(buckets, 2)
case "seconds":
- // Rebucket as powers of 10 and then merge all buckets greater
+ // Re-bucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
- b := rebucketExp(buckets, 10)
+ b := reBucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
@@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
return buckets
}
-// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
-func rebucketExp(buckets []float64, base float64) []float64 {
+func reBucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
index 351c26e1ae..6515c11480 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -19,18 +19,34 @@ import (
dto "github.com/prometheus/client_model/go"
)
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type LabelPairSorter []*dto.LabelPair
-func (s metricSorter) Len() int {
+func (s LabelPairSorter) Len() int {
return len(s)
}
-func (s metricSorter) Swap(i, j int) {
+func (s LabelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
-func (s metricSorter) Less(i, j int) bool {
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+// MetricSorter is a sortable slice of *dto.Metric.
+type MetricSorter []*dto.Metric
+
+func (s MetricSorter) Len() int {
+ return len(s)
+}
+
+func (s MetricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s MetricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
@@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool {
// the slice, with the contained Metrics sorted within each MetricFamily.
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
- sort.Sort(metricSorter(mf.Metric))
+ sort.Sort(MetricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index 2744443ac2..6eee198fef 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -39,7 +39,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality")
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
return fmt.Errorf(
- "%s: %q has %d variable labels named %q but %d values %q were provided",
+ "%w: %q has %d variable labels named %q but %d values %q were provided",
errInconsistentCardinality, fqName,
len(labels), labels,
len(labelValues), labelValues,
@@ -49,7 +49,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
+ "%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(labels), labels,
)
@@ -67,7 +67,7 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
+ "%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(vals), vals,
)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index dc121910a5..f0941f6f00 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -14,6 +14,9 @@
package prometheus
import (
+ "errors"
+ "math"
+ "sort"
"strings"
"time"
@@ -115,22 +118,6 @@ func BuildFQName(namespace, subsystem, name string) string {
return name
}
-// labelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers.
-type labelPairSorter []*dto.LabelPair
-
-func (s labelPairSorter) Len() int {
- return len(s)
-}
-
-func (s labelPairSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s labelPairSorter) Less(i, j int) bool {
- return s[i].GetName() < s[j].GetName()
-}
-
type invalidMetric struct {
desc *Desc
err error
@@ -174,3 +161,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error {
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
return timestampedMetric{Metric: m, t: t}
}
+
+type withExemplarsMetric struct {
+ Metric
+
+ exemplars []*dto.Exemplar
+}
+
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
+ if err := m.Metric.Write(pb); err != nil {
+ return err
+ }
+
+ switch {
+ case pb.Counter != nil:
+ pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
+ case pb.Histogram != nil:
+ for _, e := range m.exemplars {
+ // pb.Histogram.Bucket are sorted by UpperBound.
+ i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
+ return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
+ })
+ if i < len(pb.Histogram.Bucket) {
+ pb.Histogram.Bucket[i].Exemplar = e
+ } else {
+ // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
+ b := &dto.Bucket{
+ CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()),
+ UpperBound: proto.Float64(math.Inf(1)),
+ Exemplar: e,
+ }
+ pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
+ }
+ }
+ default:
+ // TODO(bwplotka): Implement Gauge?
+ return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
+ }
+
+ return nil
+}
+
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
+type Exemplar struct {
+ Value float64
+ Labels Labels
+ // Optional.
+ // Default value (time.Time{}) indicates its empty, which should be
+ // understood as time.Now() time at the moment of creation of metric.
+ Timestamp time.Time
+}
+
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
+// exemplars. Exemplars are validated.
+//
+// Only last applicable exemplar is injected from the list.
+// For example for Counter it means last exemplar is injected.
+// For Histogram, it means last applicable exemplar for each bucket is injected.
+//
+// NewMetricWithExemplars works best with MustNewConstMetric and
+// MustNewConstHistogram, see example.
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
+ if len(exemplars) == 0 {
+ return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
+ }
+
+ var (
+ now = time.Now()
+ exs = make([]*dto.Exemplar, len(exemplars))
+ err error
+ )
+ for i, e := range exemplars {
+ ts := e.Timestamp
+ if ts == (time.Time{}) {
+ ts = now
+ }
+ exs[i], err = newExemplar(e.Value, ts, e.Labels)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
+}
+
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
+// NewMetricWithExemplars would have returned an error.
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
+ ret, err := NewMetricWithExemplars(m, exemplars...)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
new file mode 100644
index 0000000000..7c12b21087
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "runtime"
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ n, _ := runtime.ThreadCreateProfile(nil)
+ return float64(n)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
new file mode 100644
index 0000000000..7348df01df
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
index 44128016fd..03773b21f7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -58,7 +58,7 @@ type ObserverVec interface {
// current time as timestamp, and the provided Labels. Empty Labels will lead to
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
// left in place. ObserveWithExemplar panics if any of the provided labels are
-// invalid or if the provided labels contain more than 64 runes in total.
+// invalid or if the provided labels contain more than 128 runes in total.
type ExemplarObserver interface {
ObserveWithExemplar(value float64, exemplar Labels)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 5bfe0ff5bb..8548dd18ed 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -16,7 +16,6 @@ package prometheus
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"strconv"
"strings"
@@ -104,8 +103,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
}
if opts.PidFn == nil {
- pid := os.Getpid()
- c.pidFn = func() (int, error) { return pid, nil }
+ c.pidFn = getPIDFn()
} else {
c.pidFn = opts.PidFn
}
@@ -152,13 +150,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error)
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
- content, err := ioutil.ReadFile(pidFilePath)
+ content, err := os.ReadFile(pidFilePath)
if err != nil {
- return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
+ return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
- return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
+ return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
}
return pid, nil
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
new file mode 100644
index 0000000000..b1e363d6cf
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
@@ -0,0 +1,26 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js
+// +build js
+
+package prometheus
+
+func canCollectProcess() bool {
+ return false
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ // noop on this platform
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index 2dc3660da0..c0152cdb61 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !windows
-// +build !windows
+//go:build !windows && !js
+// +build !windows,!js
package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index e7c0d05464..9819917b83 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err
}
-type closeNotifierDelegator struct{ *responseWriterDelegator }
-type flusherDelegator struct{ *responseWriterDelegator }
-type hijackerDelegator struct{ *responseWriterDelegator }
-type readerFromDelegator struct{ *responseWriterDelegator }
-type pusherDelegator struct{ *responseWriterDelegator }
+type (
+ closeNotifierDelegator struct{ *responseWriterDelegator }
+ flusherDelegator struct{ *responseWriterDelegator }
+ hijackerDelegator struct{ *responseWriterDelegator }
+ readerFromDelegator struct{ *responseWriterDelegator }
+ pusherDelegator struct{ *responseWriterDelegator }
+)
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
+
func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() {
}
d.ResponseWriter.(http.Flusher).Flush()
}
+
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
+
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
d.written += n
return n, err
}
+
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
@@ -261,7 +267,7 @@ func init() {
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
- pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
return struct {
*responseWriterDelegator
http.Pusher
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index d86d0cf4b0..a4cc9810b0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -33,6 +33,7 @@ package promhttp
import (
"compress/gzip"
+ "errors"
"fmt"
"io"
"net/http"
@@ -84,6 +85,13 @@ func Handler() http.Handler {
// instrumentation. Use the InstrumentMetricHandler function to apply the same
// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
+}
+
+// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
+// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
+// call to `done` of that `Gather`.
+func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
var (
inFlightSem chan struct{}
errCnt = prometheus.NewCounterVec(
@@ -103,7 +111,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
@@ -123,7 +132,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return
}
}
- mfs, err := reg.Gather()
+ mfs, done, err := reg.Gather()
+ defer done()
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err)
@@ -242,7 +252,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht
cnt.WithLabelValues("500")
cnt.WithLabelValues("503")
if err := reg.Register(cnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
cnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
@@ -254,7 +265,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht
Help: "Current number of scrapes being served.",
})
if err := reg.Register(gge); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
gge = are.ExistingCollector.(prometheus.Gauge)
} else {
panic(err)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 861b4d21ca..097aff2df6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -38,11 +38,11 @@ func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
gauge.Inc()
defer gauge.Dec()
return next.RoundTrip(r)
- })
+ }
}
// InstrumentRoundTripperCounter is a middleware that wraps the provided
@@ -59,22 +59,29 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
// is not incremented.
//
+// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
+//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := &option{}
+ rtOpts := defaultOptions()
for _, o := range opts {
- o(rtOpts)
+ o.apply(rtOpts)
}
code, method := checkLabels(counter)
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
+ exemplarAdd(
+ counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
+ 1,
+ rtOpts.getExemplarFn(r.Context()),
+ )
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
}
return resp, err
- })
+ }
}
// InstrumentRoundTripperDuration is a middleware that wraps the provided
@@ -94,24 +101,30 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
// reported.
//
+// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
+//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := &option{}
+ rtOpts := defaultOptions()
for _, o := range opts {
- o(rtOpts)
+ o.apply(rtOpts)
}
code, method := checkLabels(obs)
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
+ exemplarObserve(
+ obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
+ time.Since(start).Seconds(),
+ rtOpts.getExemplarFn(r.Context()),
+ )
}
return resp, err
- })
+ }
}
// InstrumentTrace is used to offer flexibility in instrumenting the available
@@ -149,7 +162,7 @@ type InstrumentTrace struct {
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
@@ -231,5 +244,5 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
return next.RoundTrip(r)
- })
+ }
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index a23f0edc6f..bfe5009877 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -28,6 +28,22 @@ import (
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
+ if labels == nil {
+ obs.Observe(val)
+ return
+ }
+ obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
+}
+
+func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
+ if labels == nil {
+ obs.Add(val)
+ return
+ }
+ obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
+}
+
// InstrumentHandlerInFlight is a middleware that wraps the provided
// http.Handler. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.Handler.
@@ -48,7 +64,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// names are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
-//`WithExtraMethods` can be used to add more methods to the set. The Observe
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
@@ -62,28 +78,37 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
code, method := checkLabels(obs)
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
- })
+ exemplarObserve(
+ obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
+ time.Since(now).Seconds(),
+ hOpts.getExemplarFn(r.Context()),
+ )
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
- obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
- })
+
+ exemplarObserve(
+ obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
+ time.Since(now).Seconds(),
+ hOpts.getExemplarFn(r.Context()),
+ )
+ }
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
@@ -104,25 +129,34 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
code, method := checkLabels(counter)
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
- })
+
+ exemplarAdd(
+ counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
+ 1,
+ hOpts.getExemplarFn(r.Context()),
+ )
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
- })
+ exemplarAdd(
+ counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
+ 1,
+ hOpts.getExemplarFn(r.Context()),
+ )
+ }
}
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
@@ -148,20 +182,24 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
code, method := checkLabels(obs)
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
+ exemplarObserve(
+ obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
+ time.Since(now).Seconds(),
+ hOpts.getExemplarFn(r.Context()),
+ )
})
next.ServeHTTP(d, r)
- })
+ }
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
@@ -184,27 +222,34 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
code, method := checkLabels(obs)
-
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
- })
+ exemplarObserve(
+ obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
+ float64(size),
+ hOpts.getExemplarFn(r.Context()),
+ )
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
- })
+ exemplarObserve(
+ obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
+ float64(size),
+ hOpts.getExemplarFn(r.Context()),
+ )
+ }
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
@@ -227,9 +272,9 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
code, method := checkLabels(obs)
@@ -237,7 +282,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
+ exemplarObserve(
+ obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
+ float64(d.Written()),
+ hOpts.getExemplarFn(r.Context()),
+ )
})
}
@@ -246,7 +295,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
-func checkLabels(c prometheus.Collector) (code bool, method bool) {
+func checkLabels(c prometheus.Collector) (code, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
var (
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
index 35e41bd1e6..c590d912c9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -13,19 +13,46 @@
package promhttp
-// Option are used to configure a middleware or round tripper..
-type Option func(*option)
+import (
+ "context"
-type option struct {
- extraMethods []string
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Option are used to configure both handler (middleware) or round tripper.
+type Option interface {
+ apply(*options)
+}
+
+// options store options for both a handler or round tripper.
+type options struct {
+ extraMethods []string
+ getExemplarFn func(requestCtx context.Context) prometheus.Labels
+}
+
+func defaultOptions() *options {
+ return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
}
+type optionApplyFunc func(*options)
+
+func (o optionApplyFunc) apply(opt *options) { o(opt) }
+
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
//
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
func WithExtraMethods(methods ...string) Option {
- return func(o *option) {
+ return optionApplyFunc(func(o *options) {
o.extraMethods = methods
- }
+ })
+}
+
+// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
+// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
+// will get instrumented without exemplar.
+func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
+ return optionApplyFunc(func(o *options) {
+ o.getExemplarFn = getExemplarFn
+ })
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 383a7f5941..325f665ff6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,8 +15,8 @@ package prometheus
import (
"bytes"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -289,7 +289,7 @@ func (r *Registry) Register(c Collector) error {
// Is the descriptor valid at all?
if desc.err != nil {
- return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
}
// Is the descID unique?
@@ -407,6 +407,14 @@ func (r *Registry) MustRegister(cs ...Collector) {
// Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ r.mtx.RLock()
+
+ if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
+ // Fast path.
+ r.mtx.RUnlock()
+ return nil, nil
+ }
+
var (
checkedMetricChan = make(chan Metric, capMetricChan)
uncheckedMetricChan = make(chan Metric, capMetricChan)
@@ -416,7 +424,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
)
- r.mtx.RLock()
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
checkedCollectors := make(chan Collector, len(r.collectorsByID))
@@ -556,7 +563,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
// This is intended for use with the textfile collector of the node exporter.
// Note that the node exporter expects the filename to be suffixed with ".prom".
func WriteToTextfile(filename string, g Gatherer) error {
- tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
+ tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
if err != nil {
return err
}
@@ -575,7 +582,7 @@ func WriteToTextfile(filename string, g Gatherer) error {
return err
}
- if err := os.Chmod(tmp.Name(), 0644); err != nil {
+ if err := os.Chmod(tmp.Name(), 0o644); err != nil {
return err
}
return os.Rename(tmp.Name(), filename)
@@ -596,7 +603,7 @@ func processMetric(
}
dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil {
- return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ return fmt.Errorf("error collecting metric %v: %w", desc, err)
}
metricFamily, ok := metricFamiliesByName[desc.fqName]
if ok { // Existing name.
@@ -718,12 +725,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
for i, g := range gs {
mfs, err := g.Gather()
if err != nil {
- if multiErr, ok := err.(MultiError); ok {
+ multiErr := MultiError{}
+ if errors.As(err, &multiErr) {
for _, err := range multiErr {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
}
} else {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
}
}
for _, mf := range mfs {
@@ -884,11 +892,11 @@ func checkMetricConsistency(
h.Write(separatorByteSlice)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
- if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
+ if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
copy(copiedLabels, dtoMetric.Label)
- sort.Sort(labelPairSorter(copiedLabels))
+ sort.Sort(internal.LabelPairSorter(copiedLabels))
dtoMetric.Label = copiedLabels
}
for _, lp := range dtoMetric.Label {
@@ -935,7 +943,7 @@ func checkDescConsistency(
metricFamily.GetName(), dtoMetric, desc,
)
}
- sort.Sort(labelPairSorter(lpsFromDesc))
+ sort.Sort(internal.LabelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
@@ -948,3 +956,89 @@ func checkDescConsistency(
}
return nil
}
+
+var _ TransactionalGatherer = &MultiTRegistry{}
+
+// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
+// transactional gatherers.
+//
+// It is caller responsibility to ensure two registries have mutually exclusive metric families,
+// no deduplication will happen.
+type MultiTRegistry struct {
+ tGatherers []TransactionalGatherer
+}
+
+// NewMultiTRegistry creates MultiTRegistry.
+func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
+ return &MultiTRegistry{
+ tGatherers: tGatherers,
+ }
+}
+
+// Gather implements TransactionalGatherer interface.
+func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
+ errs := MultiError{}
+
+ dFns := make([]func(), 0, len(r.tGatherers))
+ // TODO(bwplotka): Implement concurrency for those?
+ for _, g := range r.tGatherers {
+ // TODO(bwplotka): Check for duplicates?
+ m, d, err := g.Gather()
+ errs.Append(err)
+
+ mfs = append(mfs, m...)
+ dFns = append(dFns, d)
+ }
+
+ // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
+ sort.Slice(mfs, func(i, j int) bool {
+ return *mfs[i].Name < *mfs[j].Name
+ })
+ return mfs, func() {
+ for _, d := range dFns {
+ d()
+ }
+ }, errs.MaybeUnwrap()
+}
+
+// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
+// used by metric family is no longer used by a caller. This allows implementations with cache.
+type TransactionalGatherer interface {
+ // Gather returns metrics in a lexicographically sorted slice
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
+ //
+ // Important: done is expected to be triggered (even if the error occurs!)
+ // once caller does not need returned slice of dto.MetricFamily.
+ Gather() (_ []*dto.MetricFamily, done func(), err error)
+}
+
+// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
+func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
+ return &noTransactionGatherer{g: g}
+}
+
+type noTransactionGatherer struct {
+ g Gatherer
+}
+
+// Gather implements TransactionalGatherer interface.
+func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
+ mfs, err := g.g.Gather()
+ return mfs, func() {}, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index b4e0ae11cb..2d3abc1cbd 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -23,6 +23,8 @@ import (
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
+ "github.com/prometheus/client_golang/prometheus/internal"
+
dto "github.com/prometheus/client_model/go"
)
@@ -38,6 +40,23 @@ const (
UntypedValue
)
+var (
+ CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
+ GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
+ UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
+)
+
+func (v ValueType) ToDTO() *dto.MetricType {
+ switch v {
+ case CounterValue:
+ return CounterMetricTypePtr
+ case GaugeValue:
+ return GaugeMetricTypePtr
+ default:
+ return UntypedMetricTypePtr
+ }
+}
+
// valueFunc is a generic metric for simple values retrieved on collect time
// from a function. It implements Metric and Collector. Its effective type is
// determined by ValueType. This is a low-level building block used by the
@@ -91,11 +110,15 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil {
+ return nil, err
+ }
+
return &constMetric{
- desc: desc,
- valType: valueType,
- val: value,
- labelPairs: MakeLabelPairs(desc, labelValues),
+ desc: desc,
+ metric: metric,
}, nil
}
@@ -110,10 +133,8 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
}
type constMetric struct {
- desc *Desc
- valType ValueType
- val float64
- labelPairs []*dto.LabelPair
+ desc *Desc
+ metric *dto.Metric
}
func (m *constMetric) Desc() *Desc {
@@ -121,7 +142,11 @@ func (m *constMetric) Desc() *Desc {
}
func (m *constMetric) Write(out *dto.Metric) error {
- return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
+ out.Label = m.metric.Label
+ out.Counter = m.metric.Counter
+ out.Gauge = m.metric.Gauge
+ out.Untyped = m.metric.Untyped
+ return nil
}
func populateMetric(
@@ -170,12 +195,12 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
})
}
labelPairs = append(labelPairs, desc.constLabelPairs...)
- sort.Sort(labelPairSorter(labelPairs))
+ sort.Sort(internal.LabelPairSorter(labelPairs))
return labelPairs
}
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
-const ExemplarMaxRunes = 64
+const ExemplarMaxRunes = 128
// newExemplar creates a new dto.Exemplar from the provided values. An error is
// returned if any of the label names or values are invalid or if the total
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 4ababe6c98..7ae322590c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -99,6 +99,16 @@ func (m *MetricVec) Delete(labels Labels) bool {
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
}
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
+// passed in as labels. The order of the labels does not matter.
+// It returns the number of metrics deleted.
+//
+// Note that curried labels will never be matched if deleting from the curried vector.
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+ return m.metricMap.deleteByLabels(labels, m.curry)
+}
+
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
// show up in GoDoc.
@@ -381,6 +391,82 @@ func (m *metricMap) deleteByHashWithLabels(
return true
}
+// deleteByLabels deletes a metric if the given labels are present in the metric.
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ var numDeleted int
+
+ for h, metrics := range m.metrics {
+ i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
+ if i >= len(metrics) {
+ // Didn't find matching labels in this metric slice.
+ continue
+ }
+ delete(m.metrics, h)
+ numDeleted++
+ }
+
+ return numDeleted
+}
+
+// findMetricWithPartialLabel returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithPartialLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchPartialLabels(desc, metric.values, labels, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// indexOf searches the given slice of strings for the target string and returns
+// the index or len(items) as well as a boolean whether the search succeeded.
+func indexOf(target string, items []string) (int, bool) {
+ for i, l := range items {
+ if l == target {
+ return i, true
+ }
+ }
+ return len(items), false
+}
+
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
+// and returns whether it matches either the "base" value or the curried value accordingly.
+// It also indicates whether the match is against a curried or uncurried value.
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
+ for _, curriedValue := range curry {
+ if curriedValue.index == index {
+ // This label was curried. See if the curried value matches our target.
+ return curriedValue.value == targetValue, true
+ }
+ }
+ // This label was not curried. See if the current value matches our target label.
+ return values[index] == targetValue, false
+}
+
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ for l, v := range labels {
+ // Check if the target label exists in our metrics and get the index.
+ varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
+ if validLabel {
+ // Check the value of that label against the target value.
+ // We don't consider curried values in partial matches.
+ matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
+ if matches && !curried {
+ continue
+ }
+ }
+ return false
+ }
+ return true
+}
+
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
@@ -485,7 +571,7 @@ func findMetricWithLabels(
return len(metrics)
}
-func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
if len(values) != len(lvs)+len(curry) {
return false
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 74ee93280f..1498ee144c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -21,6 +21,8 @@ import (
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
)
// WrapRegistererWith returns a Registerer wrapping the provided
@@ -182,7 +184,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error {
Value: proto.String(lv),
})
}
- sort.Sort(labelPairSorter(out.Label))
+ sort.Sort(internal.LabelPairSorter(out.Label))
return nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index dc2eedeefc..f819e4f8b5 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -12,6 +12,7 @@
// limitations under the License.
// Build only when actually fuzzing
+//go:build gofuzz
// +build gofuzz
package expfmt
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 8a9313a3be..9d94ae9eff 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -22,7 +22,6 @@ import (
"strconv"
"strings"
- "github.com/golang/protobuf/ptypes"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
@@ -473,10 +472,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
- ts, err := ptypes.Timestamp((*e).Timestamp)
+ err = (*e).Timestamp.CheckValid()
if err != nil {
return written, err
}
+ ts := (*e).Timestamp.AsTime()
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 7f67b16e42..c909b8aa8c 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -193,7 +193,7 @@ func ParseDuration(durationStr string) (Duration, error) {
// Allow 0 without a unit.
return 0, nil
case "":
- return 0, fmt.Errorf("empty duration string")
+ return 0, errors.New("empty duration string")
}
matches := durationRE.FindStringSubmatch(durationStr)
if matches == nil {
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
index 25e3659ab2..7cc33ae4a7 100644
--- a/vendor/github.com/prometheus/procfs/.gitignore
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -1 +1,2 @@
-/fixtures/
+/testdata/fixtures/
+/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index 0aa09edacb..a197699a1e 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,4 +1,12 @@
---
linters:
enable:
- - golint
+ - godot
+ - revive
+
+linter-settings:
+ godot:
+ capital: true
+ exclude:
+ # Ignore "See: URL"
+ - 'See:'
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
index 9a1aff4127..d325872bdf 100644
--- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
+++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
@@ -1,3 +1,3 @@
-## Prometheus Community Code of Conduct
+# Prometheus Community Code of Conduct
-Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
+Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
index 943de7615e..853eb9d49b 100644
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
-This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
+This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
the file.
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
@@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t
```
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
-can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
+can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
not bother to check the size of the file before reading.
```
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
index fa2bd5b528..7edfe4d093 100644
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -14,18 +14,18 @@
include Makefile.common
%/.unpacked: %.ttar
- @echo ">> extracting fixtures"
+ @echo ">> extracting fixtures $*"
./ttar -C $(dir $*) -x -f $*.ttar
touch $@
-fixtures: fixtures/.unpacked
+fixtures: testdata/fixtures/.unpacked
update_fixtures:
- rm -vf fixtures/.unpacked
- ./ttar -c -f fixtures.ttar fixtures/
+ rm -vf testdata/fixtures/.unpacked
+ ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
.PHONY: build
build:
.PHONY: test
-test: fixtures/.unpacked common-test
+test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index a1b1ca40f4..6c8e3e2197 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version)
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
-GOVENDOR :=
-GO111MODULE :=
-ifeq (, $(PRE_GO_111))
- ifneq (,$(wildcard go.mod))
- # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
- GO111MODULE := on
-
- ifneq (,$(wildcard vendor))
- # Always use the local vendor/ directory to satisfy the dependencies.
- GOOPTS := $(GOOPTS) -mod=vendor
- endif
- endif
-else
- ifneq (,$(wildcard go.mod))
- ifneq (,$(wildcard vendor))
-$(warning This repository requires Go >= 1.11 because of Go modules)
-$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
- endif
- else
- # This repository isn't using Go modules (yet).
- GOVENDOR := $(FIRST_GOPATH)/bin/govendor
- endif
-endif
PROMU := $(FIRST_GOPATH)/bin/promu
pkgs = ./...
@@ -78,17 +55,23 @@ ifneq ($(shell which gotestsum),)
endif
endif
-PROMU_VERSION ?= 0.12.0
+PROMU_VERSION ?= 0.13.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.39.0
+GOLANGCI_LINT_VERSION ?= v1.45.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
- GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ # If we're in CI and there is an Actions file, that means the linter
+ # is being run in Actions, so we don't need to run it here.
+ ifeq (,$(CIRCLE_JOB))
+ GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
+ GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ endif
endif
endif
@@ -144,32 +127,25 @@ common-check_license:
.PHONY: common-deps
common-deps:
@echo ">> getting dependencies"
-ifdef GO111MODULE
- GO111MODULE=$(GO111MODULE) $(GO) mod download
-else
- $(GO) get $(GOOPTS) -t ./...
-endif
+ $(GO) mod download
.PHONY: update-go-deps
update-go-deps:
@echo ">> updating Go dependencies"
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
- $(GO) get $$m; \
+ $(GO) get -d $$m; \
done
- GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifneq (,$(wildcard vendor))
- GO111MODULE=$(GO111MODULE) $(GO) mod vendor
-endif
+ $(GO) mod tidy
.PHONY: common-test-short
common-test-short: $(GOTEST_DIR)
@echo ">> running short tests"
- GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
+ $(GOTEST) -short $(GOOPTS) $(pkgs)
.PHONY: common-test
common-test: $(GOTEST_DIR)
@echo ">> running all tests"
- GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
+ $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
$(GOTEST_DIR):
@mkdir -p $@
@@ -177,25 +153,21 @@ $(GOTEST_DIR):
.PHONY: common-format
common-format:
@echo ">> formatting code"
- GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
+ $(GO) fmt $(pkgs)
.PHONY: common-vet
common-vet:
@echo ">> vetting code"
- GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
+ $(GO) vet $(GOOPTS) $(pkgs)
.PHONY: common-lint
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
-ifdef GO111MODULE
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
- GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
- GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
-else
- $(GOLANGCI_LINT) run $(pkgs)
-endif
+ $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
+ $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-yamllint
@@ -212,28 +184,15 @@ endif
common-staticcheck: lint
.PHONY: common-unused
-common-unused: $(GOVENDOR)
-ifdef GOVENDOR
- @echo ">> running check for unused packages"
- @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
-else
-ifdef GO111MODULE
+common-unused:
@echo ">> running check for unused/missing packages in go.mod"
- GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifeq (,$(wildcard vendor))
+ $(GO) mod tidy
@git diff --exit-code -- go.sum go.mod
-else
- @echo ">> running check for unused packages in vendor/"
- GO111MODULE=$(GO111MODULE) $(GO) mod vendor
- @git diff --exit-code -- go.sum go.mod vendor/
-endif
-endif
-endif
.PHONY: common-build
common-build: promu
@echo ">> building binaries"
- GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
+ $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball
common-tarball: promu
@@ -289,12 +248,6 @@ $(GOLANGCI_LINT):
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif
-ifdef GOVENDOR
-.PHONY: $(GOVENDOR)
-$(GOVENDOR):
- GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
-endif
-
.PHONY: precheck
precheck::
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
index 67741f015a..fed02d85c7 100644
--- a/vendor/github.com/prometheus/procfs/SECURITY.md
+++ b/vendor/github.com/prometheus/procfs/SECURITY.md
@@ -3,4 +3,4 @@
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
-https://prometheus.io/docs/operating/security/
+<https://prometheus.io/docs/operating/security/>
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index 4e47e61720..68f36e888f 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -15,11 +15,28 @@ package procfs
import (
"fmt"
- "io/ioutil"
"net"
+ "os"
+ "strconv"
"strings"
)
+// Learned from include/uapi/linux/if_arp.h.
+const (
+ // completed entry (ha valid).
+ ATFComplete = 0x02
+ // permanent entry.
+ ATFPermanent = 0x04
+ // Publish entry.
+ ATFPublish = 0x08
+ // Has requested trailers.
+ ATFUseTrailers = 0x10
+ // Obsoleted: Want to use a netmask (only for proxy entries).
+ ATFNetmask = 0x20
+ // Don't answer this addresses.
+ ATFDontPublish = 0x40
+)
+
// ARPEntry contains a single row of the columnar data represented in
// /proc/net/arp.
type ARPEntry struct {
@@ -29,12 +46,14 @@ type ARPEntry struct {
HWAddr net.HardwareAddr
// Name of the device
Device string
+ // Flags
+ Flags byte
}
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
// and then return a slice of ARPEntry's.
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
+ data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
}
@@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
}
func parseARPEntry(columns []string) (ARPEntry, error) {
+ entry := ARPEntry{Device: columns[5]}
ip := net.ParseIP(columns[0])
- mac := net.HardwareAddr(columns[3])
+ entry.IPAddr = ip
+
+ if mac, err := net.ParseMAC(columns[3]); err == nil {
+ entry.HWAddr = mac
+ } else {
+ return ARPEntry{}, err
+ }
- entry := ARPEntry{
- IPAddr: ip,
- HWAddr: mac,
- Device: columns[5],
+ if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
+ entry.Flags = byte(flags)
+ } else {
+ return ARPEntry{}, err
}
return entry, nil
}
+
+// IsComplete returns true if ARP entry is marked with complete flag.
+func (entry *ARPEntry) IsComplete() bool {
+ return entry.Flags&ATFComplete != 0
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index 5623b24a16..ff6b927da1 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux
// +build linux
package procfs
@@ -27,7 +28,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
type CPUInfo struct {
Processor uint
VendorID string
@@ -469,7 +470,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
}
// firstNonEmptyLine advances the scanner to the first non-empty line
-// and returns the contents of that line
+// and returns the contents of that line.
func firstNonEmptyLine(scanner *bufio.Scanner) string {
for scanner.Scan() {
line := scanner.Text()
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
index 44b590ed38..64cfd534c1 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (arm || arm64)
// +build linux
// +build arm arm64
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
index 91e272573a..c11207f3ab 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (mips || mipsle || mips64 || mips64le)
// +build linux
// +build mips mipsle mips64 mips64le
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index 95b5b4ec44..ea41bf2ca1 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux
-// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
index 6068bd571c..003bc2ad4a 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (ppc64 || ppc64le)
// +build linux
// +build ppc64 ppc64le
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
index e83c2e207c..1c9b7313b6 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (riscv || riscv64)
// +build linux
// +build riscv riscv64
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
index 26814eebaa..fa3686bc00 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux
// +build linux
package procfs
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
index d5bedf97f3..a0ef55562e 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (386 || amd64)
// +build linux
// +build 386 amd64
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
deleted file mode 100644
index 5e7eeef4a5..0000000000
--- a/vendor/github.com/prometheus/procfs/fixtures.ttar
+++ /dev/null
@@ -1,7673 +0,0 @@
-# Archive created by ttar -c -f fixtures.ttar fixtures/
-Directory: fixtures
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cmdline
-Lines: 1
-vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/comm
-Lines: 1
-vim
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cwd
-SymlinkTo: /usr/bin
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/environ
-Lines: 1
-PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/exe
-SymlinkTo: /usr/bin/vim
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/10
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fdinfo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/0
-Lines: 6
-pos: 0
-flags: 02004000
-mnt_id: 13
-inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000
-inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a
-inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/1
-Lines: 4
-pos: 0
-flags: 02004002
-mnt_id: 13
-eventfd-count: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/10
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/2
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/3
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/io
-Lines: 7
-rchar: 750339
-wchar: 818609
-syscr: 7405
-syscw: 5245
-read_bytes: 1024
-write_bytes: 2048
-cancelled_write_bytes: -1024
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/limits
-Lines: 17
-Limit Soft Limit Hard Limit Units
-Max cpu time unlimited unlimited seconds
-Max file size unlimited unlimited bytes
-Max data size unlimited unlimited bytes
-Max stack size 8388608 unlimited bytes
-Max core file size 0 unlimited bytes
-Max resident set unlimited unlimited bytes
-Max processes 62898 62898 processes
-Max open files 2048 4096 files
-Max locked memory 18446744073708503040 18446744073708503040 bytes
-Max address space 8589934592 unlimited bytes
-Max file locks unlimited unlimited locks
-Max pending signals 62898 62898 signals
-Max msgqueue size 819200 819200 bytes
-Max nice priority 0 0
-Max realtime priority 0 0
-Max realtime timeout unlimited unlimited us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/mountstats
-Lines: 20
-device rootfs mounted on / with fstype rootfs
-device sysfs mounted on /sys with fstype sysfs
-device proc mounted on /proc with fstype proc
-device /dev/sda1 mounted on / with fstype ext4
-device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
- opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none
- age: 13968
- caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
- nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
- sec: flavor=1,pseudoflavor=1
- events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
- bytes: 1207640230 0 0 0 1210214218 0 295483 0
- RPC iostats version: 1.0 p/v: 100003/4 (nfs)
- xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
- per-op statistics
- NULL: 0 0 0 0 0 0 0 0
- READ: 1298 1298 0 207680 1210292152 6 79386 79407
- WRITE: 0 0 0 0 0 0 0 0
- ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717
-
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/net/dev
-Lines: 4
-Inter-| Receive | Transmit
- face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
- lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/ns
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/mnt
-SymlinkTo: mnt:[4026531840]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/net
-SymlinkTo: net:[4026531993]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/root
-SymlinkTo: /
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/schedstat
-Lines: 1
-411605849 93680043 79
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps
-Lines: 252
-00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager
-Size: 8900 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 2952 kB
-Pss: 2952 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 2952 kB
-Private_Dirty: 0 kB
-Referenced: 2864 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex mr mw me dw sd
-00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager
-Size: 10236 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 6152 kB
-Pss: 6152 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 6152 kB
-Private_Dirty: 0 kB
-Referenced: 5308 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd mr mw me dw sd
-016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager
-Size: 424 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 176 kB
-Pss: 176 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 84 kB
-Private_Dirty: 92 kB
-Referenced: 176 kB
-Anonymous: 92 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 12 kB
-SwapPss: 12 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me dw ac sd
-0171a000-0173f000 rw-p 00000000 00:00 0
-Size: 148 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 76 kB
-Pss: 76 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 76 kB
-Referenced: 76 kB
-Anonymous: 76 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-c000000000-c000400000 rw-p 00000000 00:00 0
-Size: 4096 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 2564 kB
-Pss: 2564 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 20 kB
-Private_Dirty: 2544 kB
-Referenced: 2544 kB
-Anonymous: 2564 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 1100 kB
-SwapPss: 1100 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-c000400000-c001600000 rw-p 00000000 00:00 0
-Size: 18432 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 16024 kB
-Pss: 16024 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 5864 kB
-Private_Dirty: 10160 kB
-Referenced: 11944 kB
-Anonymous: 16024 kB
-LazyFree: 5848 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 440 kB
-SwapPss: 440 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd nh
-c001600000-c004000000 rw-p 00000000 00:00 0
-Size: 43008 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0
-Size: 38596 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 1992 kB
-Pss: 1992 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 476 kB
-Private_Dirty: 1516 kB
-Referenced: 1828 kB
-Anonymous: 1992 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 384 kB
-SwapPss: 384 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack]
-Size: 132 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 8 kB
-Pss: 8 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 8 kB
-Referenced: 8 kB
-Anonymous: 8 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 4 kB
-SwapPss: 4 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me gd ac
-7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar]
-Size: 12 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd mr pf io de dd sd
-7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso]
-Size: 8 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 4 kB
-Pss: 0 kB
-Shared_Clean: 4 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 4 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex mr mw me de sd
-ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
-Size: 4 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps_rollup
-Lines: 17
-00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup]
-Rss: 29948 kB
-Pss: 29944 kB
-Shared_Clean: 4 kB
-Shared_Dirty: 0 kB
-Private_Clean: 15548 kB
-Private_Dirty: 14396 kB
-Referenced: 24752 kB
-Anonymous: 20756 kB
-LazyFree: 5848 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 1940 kB
-SwapPss: 1940 kB
-Locked: 0 kB
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/stat
-Lines: 1
-26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/status
-Lines: 53
-
-Name: prometheus
-Umask: 0022
-State: S (sleeping)
-Tgid: 26231
-Ngid: 0
-Pid: 26231
-PPid: 1
-TracerPid: 0
-Uid: 1000 1000 1000 0
-Gid: 1001 1001 1001 0
-FDSize: 128
-Groups:
-NStgid: 1
-NSpid: 1
-NSpgid: 1
-NSsid: 1
-VmPeak: 58472 kB
-VmSize: 58440 kB
-VmLck: 0 kB
-VmPin: 0 kB
-VmHWM: 8028 kB
-VmRSS: 6716 kB
-RssAnon: 2092 kB
-RssFile: 4624 kB
-RssShmem: 0 kB
-VmData: 2580 kB
-VmStk: 136 kB
-VmExe: 948 kB
-VmLib: 6816 kB
-VmPTE: 128 kB
-VmPMD: 12 kB
-VmSwap: 660 kB
-HugetlbPages: 0 kB
-Threads: 1
-SigQ: 8/63965
-SigPnd: 0000000000000000
-ShdPnd: 0000000000000000
-SigBlk: 7be3c0fe28014a03
-SigIgn: 0000000000001000
-SigCgt: 00000001800004ec
-CapInh: 0000000000000000
-CapPrm: 0000003fffffffff
-CapEff: 0000003fffffffff
-CapBnd: 0000003fffffffff
-CapAmb: 0000000000000000
-Seccomp: 0
-Cpus_allowed: ff
-Cpus_allowed_list: 0-7
-Mems_allowed: 00000000,00000001
-Mems_allowed_list: 0
-voluntary_ctxt_switches: 4742839
-nonvoluntary_ctxt_switches: 1727500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/wchan
-Lines: 1
-poll_schedule_timeoutEOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cmdline
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/comm
-Lines: 1
-ata_sff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cwd
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/4
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/limits
-Lines: 17
-Limit Soft Limit Hard Limit Units
-Max cpu time unlimited unlimited seconds
-Max file size unlimited unlimited bytes
-Max data size unlimited unlimited bytes
-Max stack size 8388608 unlimited bytes
-Max core file size 0 unlimited bytes
-Max resident set unlimited unlimited bytes
-Max processes 29436 29436 processes
-Max open files 1024 4096 files
-Max locked memory 65536 65536 bytes
-Max address space unlimited unlimited bytes
-Max file locks unlimited unlimited locks
-Max pending signals 29436 29436 signals
-Max msgqueue size 819200 819200 bytes
-Max nice priority 0 0
-Max realtime priority 0 0
-Max realtime timeout unlimited unlimited us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/maps
-Lines: 9
-55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat
-55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat
-55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap]
-7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive
-7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so
-7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack]
-7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar]
-7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso]
-ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/root
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/stat
-Lines: 1
-33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/wchan
-Lines: 1
-0EOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26233
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/cmdline
-Lines: 1
-com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/schedstat
-Lines: 8
- ____________________________________
-< this is a malformed schedstat file >
- ------------------------------------
- \ ^__^
- \ (oo)\_______
- (__)\ )\/\
- ||----w |
- || ||
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26234
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26234/maps
-Lines: 4
-08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh
-08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh
-0808c000-08146000 rwxp 00000000 00:00 0
-40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/584
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/584/stat
-Lines: 2
-1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
-#!/bin/cat /proc/self/stat
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/buddyinfo
-Lines: 3
-Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
-Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
-Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/cmdline
-Lines: 1
-BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/cpuinfo
-Lines: 216
-processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.998
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 1
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.037
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 2
-initial apicid : 2
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 2
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.010
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 4
-initial apicid : 4
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 3
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.028
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 6
-initial apicid : 6
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 4
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.989
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 1
-initial apicid : 1
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 5
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.083
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 3
-initial apicid : 3
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 6
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.017
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 5
-initial apicid : 5
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 7
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.030
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 7
-initial apicid : 7
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/crypto
-Lines: 972
-name : ccm(aes)
-driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni))
-module : ccm
-priority : 300
-refcnt : 4
-selftest : passed
-internal : no
-type : aead
-async : no
-blocksize : 1
-ivsize : 16
-maxauthsize : 16
-geniv : <none>
-
-name : cbcmac(aes)
-driver : cbcmac(aes-aesni)
-module : ccm
-priority : 300
-refcnt : 7
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 16
-
-name : ecdh
-driver : ecdh-generic
-module : ecdh_generic
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-async : yes
-
-name : ecb(arc4)
-driver : ecb(arc4)-generic
-module : arc4
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 1
-max keysize : 256
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : arc4
-driver : arc4-generic
-module : arc4
-priority : 0
-refcnt : 3
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 1
-max keysize : 256
-
-name : crct10dif
-driver : crct10dif-pclmul
-module : crct10dif_pclmul
-priority : 200
-refcnt : 2
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32
-driver : crc32-pclmul
-module : crc32_pclmul
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : __ghash
-driver : cryptd(__ghash-pclmulqdqni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : ghash
-driver : ghash-clmulni
-module : ghash_clmulni_intel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : __ghash
-driver : __ghash-pclmulqdqni
-module : ghash_clmulni_intel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : crc32c
-driver : crc32c-intel
-module : crc32c_intel
-priority : 200
-refcnt : 5
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : cbc(aes)
-driver : cbc(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 5
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : pkcs1pad(rsa,sha256)
-driver : pkcs1pad(rsa-generic,sha256)
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : __xts(aes)
-driver : cryptd(__xts-aes-aesni)
-module : kernel
-priority : 451
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : xts(aes)
-driver : xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : cryptd(__ctr-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 1
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : cryptd(__cbc-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : cbc(aes)
-driver : cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : cryptd(__ecb-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : ecb(aes)
-driver : ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __generic-gcm-aes-aesni
-driver : cryptd(__driver-generic-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : gcm(aes)
-driver : generic-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : __generic-gcm-aes-aesni
-driver : __driver-generic-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : __gcm-aes-aesni
-driver : cryptd(__driver-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : rfc4106(gcm(aes))
-driver : rfc4106-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : __gcm-aes-aesni
-driver : __driver-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : __xts(aes)
-driver : __xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : __ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : __cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : __ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __aes
-driver : __aes-aesni
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : yes
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : aes
-driver : aes-aesni
-module : kernel
-priority : 300
-refcnt : 8
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : hmac(sha1)
-driver : hmac(sha1-generic)
-module : kernel
-priority : 100
-refcnt : 9
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : ghash
-driver : ghash-generic
-module : kernel
-priority : 100
-refcnt : 3
-selftest : passed
-internal : no
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : jitterentropy_rng
-driver : jitterentropy_rng
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha256
-module : kernel
-priority : 221
-refcnt : 2
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha512
-module : kernel
-priority : 220
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha384
-module : kernel
-priority : 219
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha1
-module : kernel
-priority : 218
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha256
-module : kernel
-priority : 217
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha512
-module : kernel
-priority : 216
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha384
-module : kernel
-priority : 215
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha1
-module : kernel
-priority : 214
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes256
-module : kernel
-priority : 213
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes192
-module : kernel
-priority : 212
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes128
-module : kernel
-priority : 211
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : hmac(sha256)
-driver : hmac(sha256-generic)
-module : kernel
-priority : 100
-refcnt : 10
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : stdrng
-driver : drbg_pr_hmac_sha256
-module : kernel
-priority : 210
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha512
-module : kernel
-priority : 209
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha384
-module : kernel
-priority : 208
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha1
-module : kernel
-priority : 207
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha256
-module : kernel
-priority : 206
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha512
-module : kernel
-priority : 205
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha384
-module : kernel
-priority : 204
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha1
-module : kernel
-priority : 203
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes256
-module : kernel
-priority : 202
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes192
-module : kernel
-priority : 201
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes128
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : 842
-driver : 842-scomp
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : 842
-driver : 842-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo-rle
-driver : lzo-rle-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo-rle
-driver : lzo-rle-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo
-driver : lzo-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo
-driver : lzo-generic
-module : kernel
-priority : 0
-refcnt : 9
-selftest : passed
-internal : no
-type : compression
-
-name : crct10dif
-driver : crct10dif-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32c
-driver : crc32c-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : zlib-deflate
-driver : zlib-deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : aes
-driver : aes-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : sha224
-driver : sha224-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 28
-
-name : sha256
-driver : sha256-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : sha1
-driver : sha1-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : md5
-driver : md5-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 16
-
-name : ecb(cipher_null)
-driver : ecb-cipher_null
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 0
-max keysize : 0
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : digest_null
-driver : digest_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 0
-
-name : compress_null
-driver : compress_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : cipher_null
-driver : cipher_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 0
-max keysize : 0
-
-name : rsa
-driver : rsa-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : dh
-driver : dh-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-
-name : aes
-driver : aes-asm
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-Mode: 444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/diskstats
-Lines: 52
- 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0
- 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0
- 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0
- 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0
- 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0
- 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0
- 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0
- 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0
- 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0
- 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0
- 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0
- 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0
- 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0
- 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0
- 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0
- 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0
- 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0
- 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0
- 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0
- 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0
- 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0
- 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0
- 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
- 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
- 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804
- 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36
- 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32
- 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60
- 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428
- 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256
- 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84
- 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416
- 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104
- 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44
- 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632
- 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156
- 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24
- 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68
- 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80
- 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228
- 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720
- 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992
- 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0
- 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546
- 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16
- 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970
- 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130
- 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0
- 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130
- 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182
- 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0
- 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/fscache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/fscache/stats
-Lines: 24
-FS-Cache statistics
-Cookies: idx=3 dat=67877 spc=0
-Objects: alc=67473 nal=0 avl=67473 ded=388
-ChkAux : non=12 ok=33 upd=44 obs=55
-Pages : mrk=547164 unc=364577
-Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26
-Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85
-Invals : n=14 run=13
-Updates: n=7 nul=3 run=8
-Relinqs: n=394 nul=1 wcr=2 rtr=3
-AttrChg: n=6 ok=5 nbf=4 oom=3 run=2
-Allocs : n=20 ok=19 wt=18 nbf=17 int=16
-Allocs : ops=15 owt=14 abt=13
-Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43
-Retrvls: ops=151959 owt=42747 abt=44
-Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14
-Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43
-VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66
-Ops : pend=42753 run=221129 enq=628798 can=11 rej=88
-Ops : ini=377538 dfr=27 rel=377538 gc=37
-CacheOp: alo=1 luo=2 luc=3 gro=4
-CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10
-CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17
-CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/xfs/stat
-Lines: 23
-extent_alloc 92447 97589 92448 93751
-abt 0 0 0 0
-blk_map 1767055 188820 184891 92447 92448 2140766 0
-bmbt 0 0 0 0
-dir 185039 92447 92444 136422
-trans 706 944304 0
-ig 185045 58807 0 126238 0 33637 22
-log 2883 113448 9 17360 739
-push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
-xstrat 92447 0
-rw 107739 94045
-attr 4 0 0 0
-icluster 8677 7849 135802
-vnodes 92601 0 0 0 92444 92444 92444 0
-buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
-abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
-abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
-bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
-fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-qm 0 0 0 0 0 0 0 0
-xpc 399724544 92823103 86219234
-debug 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/loadavg
-Lines: 1
-0.02 0.04 0.05 1/497 11947
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/mdstat
-Lines: 60
-Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
-
-md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S)
- 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
-
-md127 : active raid1 sdi2[0] sdj2[1]
- 312319552 blocks [2/2] [UU]
-
-md0 : active raid1 sdi1[0] sdj1[1]
- 248896 blocks [2/2] [UU]
-
-md4 : inactive raid1 sda3[0](F) sdb3[1](S)
- 4883648 blocks [2/2] [UU]
-
-md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0]
- 195310144 blocks [2/1] [U_]
- [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S)
- 195310144 blocks [2/2] [UU]
- [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md201 : active raid1 sda3[0] sdb3[1]
- 1993728 blocks super 1.2 [2/2] [UU]
- [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec
-
-md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F)
- 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
- bitmap: 0/30 pages [0KB], 65536KB chunk
-
-md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S)
- 523968 blocks super 1.2 [4/4] [UUUU]
- resync=DELAYED
-
-md10 : active raid0 sda1[0] sdb1[1]
- 314159265 blocks 64k chunks
-
-md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S)
- 4190208 blocks super 1.2 [2/2] [UU]
- resync=PENDING
-
-md12 : active raid0 sdc2[0] sdd2[1]
- 3886394368 blocks super 1.2 512k chunks
-
-md126 : active raid0 sdb[1] sdc[0]
- 1855870976 blocks super external:/md127/0 128k chunks
-
-md219 : inactive sdb[2](S) sdc[1](S) sda[0](S)
- 7932 blocks super external:imsm
-
-md00 : active raid0 xvdb[0]
- 4186624 blocks super 1.2 256k chunks
-
-md120 : active linear sda1[1] sdb1[0]
- 2095104 blocks super 1.2 0k rounding
-
-md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0]
- 322560 blocks super 1.2 512k chunks
-
-unused devices: <none>
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/meminfo
-Lines: 42
-MemTotal: 15666184 kB
-MemFree: 440324 kB
-Buffers: 1020128 kB
-Cached: 12007640 kB
-SwapCached: 0 kB
-Active: 6761276 kB
-Inactive: 6532708 kB
-Active(anon): 267256 kB
-Inactive(anon): 268 kB
-Active(file): 6494020 kB
-Inactive(file): 6532440 kB
-Unevictable: 0 kB
-Mlocked: 0 kB
-SwapTotal: 0 kB
-SwapFree: 0 kB
-Dirty: 768 kB
-Writeback: 0 kB
-AnonPages: 266216 kB
-Mapped: 44204 kB
-Shmem: 1308 kB
-Slab: 1807264 kB
-SReclaimable: 1738124 kB
-SUnreclaim: 69140 kB
-KernelStack: 1616 kB
-PageTables: 5288 kB
-NFS_Unstable: 0 kB
-Bounce: 0 kB
-WritebackTmp: 0 kB
-CommitLimit: 7833092 kB
-Committed_AS: 530844 kB
-VmallocTotal: 34359738367 kB
-VmallocUsed: 36596 kB
-VmallocChunk: 34359637840 kB
-HardwareCorrupted: 0 kB
-AnonHugePages: 12288 kB
-HugePages_Total: 0
-HugePages_Free: 0
-HugePages_Rsvd: 0
-HugePages_Surp: 0
-Hugepagesize: 2048 kB
-DirectMap4k: 91136 kB
-DirectMap2M: 16039936 kB
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/arp
-Lines: 2
-IP address HW type Flags HW address Mask Device
-192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/dev
-Lines: 6
-Inter-| Receive | Transmit
- face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
-vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
- lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
-docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
- eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs
-Lines: 21
-IP Virtual Server version 1.2.1 (size=4096)
-Prot LocalAddress:Port Scheduler Flags
- -> RemoteAddress:Port Forward Weight ActiveConn InActConn
-TCP C0A80016:0CEA wlc
- -> C0A85216:0CEA Tunnel 100 248 2
- -> C0A85318:0CEA Tunnel 100 248 2
- -> C0A85315:0CEA Tunnel 100 248 1
-TCP C0A80039:0CEA wlc
- -> C0A85416:0CEA Tunnel 0 0 0
- -> C0A85215:0CEA Tunnel 100 1499 0
- -> C0A83215:0CEA Tunnel 100 1498 0
-TCP C0A80037:0CEA wlc
- -> C0A8321A:0CEA Tunnel 0 0 0
- -> C0A83120:0CEA Tunnel 100 0 0
-TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
- -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
- -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
- -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
-FWM 10001000 wlc
- -> C0A8321A:0CEA Route 0 0 1
- -> C0A83215:0CEA Route 0 0 2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs_stats
-Lines: 6
- Total Incoming Outgoing Incoming Outgoing
- Conns Packets Packets Bytes Bytes
- 16AA370 E33656E5 0 51D8C8883AB3 0
-
- Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
- 4 1FB3C 0 1282A8F 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/protocols
-Lines: 14
-protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
-PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
-PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
-RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
-UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
-UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
-TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y
-UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n
-UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
-PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
-RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
-UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
-TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y
-NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net/rpc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfs
-Lines: 5
-net 18628 0 18628 6
-rpc 4329785 0 4338291
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
-proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfsd
-Lines: 11
-rc 0 6 18622
-fh 0 0 0 0 0
-io 157286400 0
-th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
-ra 32 0 0 0 0 0 0 0 0 0 0 0
-net 18628 0 18628 6
-rpc 18628 0 0 0 0
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
-proc4 2 2 10853
-proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat
-Lines: 6
-sockets: used 1602
-TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22
-UDP: inuse 12 mem 62
-UDPLITE: inuse 0
-RAW: inuse 0
-FRAG: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat6
-Lines: 5
-TCP6: inuse 17
-UDP6: inuse 9
-UDPLITE6: inuse 0
-RAW6: inuse 1
-FRAG6: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat
-Lines: 2
-00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
-01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat.broken
-Lines: 1
-00015c73 00020e76 F0000769 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net/stat
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/stat/arp_cache
-Lines: 3
-entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls
-00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c
-00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/stat/ndisc_cache
-Lines: 3
-entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls
-00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb
-00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp
-Lines: 4
- sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp6
-Lines: 3
- sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp
-Lines: 4
- sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp6
-Lines: 3
- sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp_broken
-Lines: 2
- sl local_address rem_address st
- 1: 00000000:0016 00000000:0000 0A
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix
-Lines: 6
-Num RefCount Protocol Flags Type St Inode Path
-0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03 5091797
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix_without_inode
-Lines: 6
-Num RefCount Protocol Flags Type St Path
-0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/xfrm_stat
-Lines: 28
-XfrmInError 1
-XfrmInBufferError 2
-XfrmInHdrError 4
-XfrmInNoStates 3
-XfrmInStateProtoError 40
-XfrmInStateModeError 100
-XfrmInStateSeqError 6000
-XfrmInStateExpired 4
-XfrmInStateMismatch 23451
-XfrmInStateInvalid 55555
-XfrmInTmplMismatch 51
-XfrmInNoPols 65432
-XfrmInPolBlock 100
-XfrmInPolError 10000
-XfrmOutError 1000000
-XfrmOutBundleGenError 43321
-XfrmOutBundleCheckError 555
-XfrmOutNoStates 869
-XfrmOutStateProtoError 4542
-XfrmOutStateModeError 4
-XfrmOutStateSeqError 543
-XfrmOutStateExpired 565
-XfrmOutPolBlock 43456
-XfrmOutPolDead 7656
-XfrmOutPolError 1454
-XfrmFwdHdrError 6654
-XfrmOutStateInvalid 28765
-XfrmAcquireError 24532
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/pressure
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/cpu
-Lines: 1
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/io
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/memory
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/schedstat
-Lines: 6
-version 15
-timestamp 15819019232
-cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306
-domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0
-cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945
-domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/self
-SymlinkTo: 26231
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/slabinfo
-Lines: 302
-slabinfo - version: 2.1
-# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> : tunables <limit> <batchcount> <sharedfactor> : slabdata <active_slabs> <num_slabs> <sharedavail>
-pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0
-pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0
-nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0
-kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
-kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0
-kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0
-pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0
-x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0
-iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
-ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0
-bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0
-bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0
-fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0
-fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0
-squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
-fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
-fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0
-xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0
-xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
-nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
-nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0
-nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0
-nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
-jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
-jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0
-reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
-btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0
-ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0
-ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0
-ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0
-ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0
-ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0
-ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0
-jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0
-jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0
-jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0
-jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0
-jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
-ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0
-mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0
-dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
-dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
-dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0
-kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0
-io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0
-dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
-aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
-qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
-sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0
-scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0
-virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0
-RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0
-UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0
-UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0
-tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0
-TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0
-uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0
-bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0
-mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0
-isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0
-io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0
-aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0
-dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
-bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0
-posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0
-iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0
-iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0
-UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0
-ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0
-ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
-inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
-xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
-ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0
-ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0
-ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0
-PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0
-UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0
-tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0
-request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0
-TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0
-hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0
-dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0
-eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0
-inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0
-scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0
-request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0
-blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0
-bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0
-biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0
-biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0
-biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0
-biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
-bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0
-ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0
-uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0
-audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0
-sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0
-skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0
-skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0
-configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0
-file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0
-file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0
-fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0
-net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0
-task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0
-taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0
-proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0
-pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0
-proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0
-seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0
-sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
-bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
-shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0
-kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0
-kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0
-mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0
-inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0
-dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0
-names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0
-hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
-iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0
-lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0
-key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0
-uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
-nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
-vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0
-mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0
-fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0
-files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0
-signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0
-sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0
-task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0
-cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0
-anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0
-anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0
-pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0
-Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0
-Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0
-Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0
-Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0
-trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0
-ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0
-pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0
-radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0
-task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
-vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0
-dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0
-kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0
-kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0
-kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0
-kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0
-kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0
-kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0
-kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0
-kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0
-kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0
-kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0
-kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0
-kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0
-kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0
-kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0
-kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0
-kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/stat
-Lines: 16
-cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
-cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
-cpu1 47869 23 16474 1110787 591 0 46 0 0 0
-cpu2 46504 36 15916 1112321 441 0 326 0 0 0
-cpu3 47054 102 15683 1113230 533 0 60 0 0 0
-cpu4 28413 25 10776 1140321 217 0 8 0 0 0
-cpu5 29271 101 11586 1136270 672 0 30 0 0 0
-cpu6 29152 36 10276 1139721 319 0 29 0 0 0
-cpu7 29098 268 10164 1139282 555 0 31 0 0 0
-intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ctxt 38014093
-btime 1418183276
-processes 26442
-procs_running 2
-procs_blocked 1
-softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/swaps
-Lines: 2
-Filename Type Size Used Priority
-/dev/dm-2 partition 131068 176 -2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/symlinktargets
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/README
-Lines: 2
-This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
-They are otherwise ignored by the tests
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/abc
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/def
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/ghi
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/uvw
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/xyz
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel/random
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/entropy_avail
-Lines: 1
-3943
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/poolsize
-Lines: 1
-4096
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold
-Lines: 1
-3072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/vm
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/admin_reserve_kbytes
-Lines: 1
-8192
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/block_dump
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/compact_unevictable_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_ratio
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_expire_centisecs
-Lines: 1
-3000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_ratio
-Lines: 1
-20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_writeback_centisecs
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirtytime_expire_seconds
-Lines: 1
-43200
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/drop_caches
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/extfrag_threshold
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/hugetlb_shm_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/laptop_mode
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/legacy_va_layout
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/lowmem_reserve_ratio
-Lines: 1
-256 256 32 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/max_map_count
-Lines: 1
-65530
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_early_kill
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_recovery
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_free_kbytes
-Lines: 1
-67584
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_slab_ratio
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_unmapped_ratio
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/mmap_min_addr
-Lines: 1
-65536
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_overcommit_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_stat
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_zonelist_order
-Lines: 1
-Node
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_dump_tasks
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_kill_allocating_task
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_kbytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_memory
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_ratio
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/page-cluster
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/panic_on_oom
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/percpu_pagelist_fraction
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/stat_interval
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/swappiness
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/user_reserve_kbytes
-Lines: 1
-131072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/vfs_cache_pressure
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_boost_factor
-Lines: 1
-15000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_scale_factor
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/zone_reclaim_mode
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/zoneinfo
-Lines: 262
-Node 0, zone DMA
- per-node stats
- nr_inactive_anon 230981
- nr_active_anon 547580
- nr_inactive_file 316904
- nr_active_file 346282
- nr_unevictable 115467
- nr_slab_reclaimable 131220
- nr_slab_unreclaimable 47320
- nr_isolated_anon 0
- nr_isolated_file 0
- workingset_nodes 11627
- workingset_refault 466886
- workingset_activate 276925
- workingset_restore 84055
- workingset_nodereclaim 487
- nr_anon_pages 795576
- nr_mapped 215483
- nr_file_pages 761874
- nr_dirty 908
- nr_writeback 0
- nr_writeback_temp 0
- nr_shmem 224925
- nr_shmem_hugepages 0
- nr_shmem_pmdmapped 0
- nr_anon_transparent_hugepages 0
- nr_unstable 0
- nr_vmscan_write 12950
- nr_vmscan_immediate_reclaim 3033
- nr_dirtied 8007423
- nr_written 7752121
- nr_kernel_misc_reclaimable 0
- pages free 3952
- min 33
- low 41
- high 49
- spanned 4095
- present 3975
- managed 3956
- protection: (0, 2877, 7826, 7826, 7826)
- nr_free_pages 3952
- nr_zone_inactive_anon 0
- nr_zone_active_anon 0
- nr_zone_inactive_file 0
- nr_zone_active_file 0
- nr_zone_unevictable 0
- nr_zone_write_pending 0
- nr_mlock 0
- nr_page_table_pages 0
- nr_kernel_stack 0
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 1
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 1
- numa_other 0
- pagesets
- cpu: 0
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 1
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 2
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 3
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 4
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 5
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 6
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 7
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- node_unreclaimable: 0
- start_pfn: 1
-Node 0, zone DMA32
- pages free 204252
- min 19510
- low 21059
- high 22608
- spanned 1044480
- present 759231
- managed 742806
- protection: (0, 0, 4949, 4949, 4949)
- nr_free_pages 204252
- nr_zone_inactive_anon 118558
- nr_zone_active_anon 106598
- nr_zone_inactive_file 75475
- nr_zone_active_file 70293
- nr_zone_unevictable 66195
- nr_zone_write_pending 64
- nr_mlock 4
- nr_page_table_pages 1756
- nr_kernel_stack 2208
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 113952967
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 113952967
- numa_other 0
- pagesets
- cpu: 0
- count: 345
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 1
- count: 356
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 2
- count: 325
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 3
- count: 346
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 4
- count: 321
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 5
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 6
- count: 373
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 7
- count: 339
- high: 378
- batch: 63
- vm stats threshold: 48
- node_unreclaimable: 0
- start_pfn: 4096
-Node 0, zone Normal
- pages free 18553
- min 11176
- low 13842
- high 16508
- spanned 1308160
- present 1308160
- managed 1268711
- protection: (0, 0, 0, 0, 0)
- nr_free_pages 18553
- nr_zone_inactive_anon 112423
- nr_zone_active_anon 440982
- nr_zone_inactive_file 241429
- nr_zone_active_file 275989
- nr_zone_unevictable 49272
- nr_zone_write_pending 844
- nr_mlock 154
- nr_page_table_pages 9750
- nr_kernel_stack 15136
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 162718019
- numa_miss 0
- numa_foreign 0
- numa_interleave 26812
- numa_local 162718019
- numa_other 0
- pagesets
- cpu: 0
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 1
- count: 366
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 2
- count: 60
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 3
- count: 256
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 4
- count: 253
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 5
- count: 159
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 6
- count: 311
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 7
- count: 264
- high: 378
- batch: 63
- vm stats threshold: 56
- node_unreclaimable: 0
- start_pfn: 1048576
-Node 0, zone Movable
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Node 0, zone Device
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/dm-0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/dm-0/stat
-Lines: 1
-6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/add_random
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/chunk_sectors
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/dax
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_granularity
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_zeroes_data
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/fua
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/hw_sector_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll_delay
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_timeout
-Lines: 1
-30000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue/iosched
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_max
-Lines: 1
-16384
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async
-Lines: 1
-250
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/low_latency
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/max_budget
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle
-Lines: 1
-8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us
-Lines: 1
-8000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/timeout_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iostats
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/logical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_discard_segments
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb
-Lines: 1
-32767
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_integrity_segments
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_sectors_kb
-Lines: 1
-1280
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segment_size
-Lines: 1
-65536
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segments
-Lines: 1
-168
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/minimum_io_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nomerges
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_requests
-Lines: 1
-64
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_zones
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/optimal_io_size
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/physical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/read_ahead_kb
-Lines: 1
-128
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rotational
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rq_affinity
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/scheduler
-Lines: 1
-mq-deadline kyber [bfq] none
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/wbt_lat_usec
-Lines: 1
-75000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_cache
-Lines: 1
-write back
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_same_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/zoned
-Lines: 1
-none
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/stat
-Lines: 1
-9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/drm
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/drm/card0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/drm/card0/device
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/aer_dev_correctable
-Lines: 9
-RxErr 0
-BadTLP 0
-BadDLLP 0
-Rollover 0
-Timeout 0
-NonFatalErr 0
-CorrIntErr 0
-HeaderOF 0
-TOTAL_ERR_COR 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/aer_dev_fatal
-Lines: 19
-Undefined 0
-DLP 0
-SDES 0
-TLP 0
-FCP 0
-CmpltTO 0
-CmpltAbrt 0
-UnxCmplt 0
-RxOF 0
-MalfTLP 0
-ECRC 0
-UnsupReq 0
-ACSViol 0
-UncorrIntErr 0
-BlockedTLP 0
-AtomicOpBlocked 0
-TLPBlockedErr 0
-PoisonTLPBlocked 0
-TOTAL_ERR_FATAL 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/aer_dev_nonfatal
-Lines: 19
-Undefined 0
-DLP 0
-SDES 0
-TLP 0
-FCP 0
-CmpltTO 0
-CmpltAbrt 0
-UnxCmplt 0
-RxOF 0
-MalfTLP 0
-ECRC 0
-UnsupReq 0
-ACSViol 0
-UncorrIntErr 0
-BlockedTLP 0
-AtomicOpBlocked 0
-TLPBlockedErr 0
-PoisonTLPBlocked 0
-TOTAL_ERR_NONFATAL 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/ari_enabled
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/boot_vga
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/broken_parity_status
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/class
-Lines: 1
-0x030000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/consistent_dma_mask_bits
-Lines: 1
-44
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/current_link_speed
-Lines: 1
-8.0 GT/s PCIe
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/current_link_width
-Lines: 1
-16
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/d3cold_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/device
-Lines: 1
-0x687f
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/dma_mask_bits
-Lines: 1
-44
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/driver_override
-Lines: 1
-(null)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/gpu_busy_percent
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/irq
-Lines: 1
-95
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/local_cpulist
-Lines: 1
-0-15
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/local_cpus
-Lines: 1
-0000ffff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/max_link_speed
-Lines: 1
-8.0 GT/s PCIe
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/max_link_width
-Lines: 1
-16
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_total
-Lines: 1
-8573157376
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_used
-Lines: 1
-144560128
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_total
-Lines: 1
-8573157376
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_used
-Lines: 1
-1490378752
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/mem_info_vram_total
-Lines: 1
-8573157376
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/mem_info_vram_used
-Lines: 1
-1490378752
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/mem_info_vram_vendor
-Lines: 1
-samsung
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/modalias
-Lines: 1
-pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/msi_bus
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/numa_node
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pcie_bw
-Lines: 1
-6641 815 256
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pcie_replay_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/power_dpm_force_performance_level
-Lines: 1
-manual
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/power_dpm_state
-Lines: 1
-performance
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/power_state
-Lines: 1
-D0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_cur_state
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_dpm_dcefclk
-Lines: 5
-0: 600Mhz *
-1: 720Mhz
-2: 800Mhz
-3: 847Mhz
-4: 900Mhz
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_dpm_mclk
-Lines: 4
-0: 167Mhz *
-1: 500Mhz
-2: 800Mhz
-3: 945Mhz
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_dpm_pcie
-Lines: 2
-0: 8.0GT/s, x16
-1: 8.0GT/s, x16 *
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_dpm_sclk
-Lines: 8
-0: 852Mhz *
-1: 991Mhz
-2: 1084Mhz
-3: 1138Mhz
-4: 1200Mhz
-5: 1401Mhz
-6: 1536Mhz
-7: 1630Mhz
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_dpm_socclk
-Lines: 8
-0: 600Mhz
-1: 720Mhz *
-2: 800Mhz
-3: 847Mhz
-4: 900Mhz
-5: 960Mhz
-6: 1028Mhz
-7: 1107Mhz
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_features
-Lines: 32
-Current ppfeatures: 0x0000000019a1ff4f
-FEATURES BITMASK ENABLEMENT
-DPM_PREFETCHER 0x0000000000000001 Y
-GFXCLK_DPM 0x0000000000000002 Y
-UCLK_DPM 0x0000000000000004 Y
-SOCCLK_DPM 0x0000000000000008 Y
-UVD_DPM 0x0000000000000010 N
-VCE_DPM 0x0000000000000020 N
-ULV 0x0000000000000040 Y
-MP0CLK_DPM 0x0000000000000080 N
-LINK_DPM 0x0000000000000100 Y
-DCEFCLK_DPM 0x0000000000000200 Y
-AVFS 0x0000000000000400 Y
-GFXCLK_DS 0x0000000000000800 Y
-SOCCLK_DS 0x0000000000001000 Y
-LCLK_DS 0x0000000000002000 Y
-PPT 0x0000000000004000 Y
-TDC 0x0000000000008000 Y
-THERMAL 0x0000000000010000 Y
-GFX_PER_CU_CG 0x0000000000020000 N
-RM 0x0000000000040000 N
-DCEFCLK_DS 0x0000000000080000 N
-ACDC 0x0000000000100000 N
-VR0HOT 0x0000000000200000 Y
-VR1HOT 0x0000000000400000 N
-FW_CTF 0x0000000000800000 Y
-LED_DISPLAY 0x0000000001000000 Y
-FAN_CONTROL 0x0000000002000000 N
-FAST_PPT 0x0000000004000000 N
-DIDT 0x0000000008000000 Y
-ACG 0x0000000010000000 Y
-PCC_LIMIT 0x0000000020000000 N
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_force_state
-Lines: 1
-
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_mclk_od
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_num_states
-Lines: 3
-states: 2
-0 boot
-1 performance
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_od_clk_voltage
-Lines: 18
-OD_SCLK:
-0: 852Mhz 800mV
-1: 991Mhz 900mV
-2: 1084Mhz 950mV
-3: 1138Mhz 1000mV
-4: 1200Mhz 1050mV
-5: 1401Mhz 1100mV
-6: 1536Mhz 1150mV
-7: 1630Mhz 1200mV
-OD_MCLK:
-0: 167Mhz 800mV
-1: 500Mhz 800mV
-2: 800Mhz 950mV
-3: 945Mhz 1100mV
-OD_RANGE:
-SCLK: 852MHz 2400MHz
-MCLK: 167MHz 1500MHz
-VDDC: 800mV 1200mV
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_power_profile_mode
-Lines: 8
-NUM MODE_NAME BUSY_SET_POINT FPS USE_RLC_BUSY MIN_ACTIVE_LEVEL
- 0 BOOTUP_DEFAULT : 70 60 0 0
- 1 3D_FULL_SCREEN*: 70 60 1 3
- 2 POWER_SAVING : 90 60 0 0
- 3 VIDEO : 70 60 0 0
- 4 VR : 70 90 0 0
- 5 COMPUTE : 30 60 0 6
- 6 CUSTOM : 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/pp_sclk_od
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/product_name
-Lines: 1
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/product_number
-Lines: 1
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/resource
-Lines: 13
-0x0000007c00000000 0x0000007dffffffff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000007e00000000 0x0000007e0fffffff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x000000000000d000 0x000000000000d0ff 0x0000000000040101
-0x00000000fcd00000 0x00000000fcd7ffff 0x0000000000040200
-0x00000000fcd80000 0x00000000fcd9ffff 0x0000000000046200
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/revision
-Lines: 1
-0xc1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/serial_number
-Lines: 1
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/subsystem_device
-Lines: 1
-0x04c4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/subsystem_vendor
-Lines: 1
-0x1043
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/thermal_throttling_logging
-Lines: 1
-0000:09:00.0: thermal throttling logging enabled, with interval 60 seconds
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/uevent
-Lines: 6
-DRIVER=amdgpu
-PCI_CLASS=30000
-PCI_ID=1002:687F
-PCI_SUBSYS_ID=1043:04C4
-PCI_SLOT_NAME=0000:09:00.0
-MODALIAS=pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/unique_id
-Lines: 1
-0123456789abcdef
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/vbios_version
-Lines: 1
-115-D050PIL-100
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/drm/card0/device/vendor
-Lines: 1
-0x1002
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo
-Lines: 1
-30
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/fabric_name
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/node_name
-Lines: 1
-0x2000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_id
-Lines: 1
-0x000002
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_name
-Lines: 1
-0x1000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_state
-Lines: 1
-Online
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_type
-Lines: 1
-Point-To-Point (direct nport connection)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/speed
-Lines: 1
-16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames
-Lines: 1
-0xffffffffffffffff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/error_frames
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts
-Lines: 1
-0x13
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count
-Lines: 1
-0x2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count
-Lines: 1
-0x8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count
-Lines: 1
-0x9
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count
-Lines: 1
-0x11
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count
-Lines: 1
-0x10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/nos_count
-Lines: 1
-0x12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames
-Lines: 1
-0x3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_words
-Lines: 1
-0x4
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset
-Lines: 1
-0x7
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames
-Lines: 1
-0x5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_words
-Lines: 1
-0x6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_classes
-Lines: 1
-Class 3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_speeds
-Lines: 1
-4 Gbit, 8 Gbit, 16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/symbolic_name
-Lines: 1
-Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/hfi1_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/board_id
-Lines: 1
-HPE 100Gb 1-port OP101 QSFP28 x16 PCIe Gen3 with Intel Omni-Path Adapter
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/fw_ver
-Lines: 1
-1.27.0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/hfi1_0/ports
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/VL15_dropped
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_data
-Lines: 1
-345091702026
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_packets
-Lines: 1
-638036947
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_data
-Lines: 1
-273558326543
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_packets
-Lines: 1
-568318856
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_wait
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/rate
-Lines: 1
-100 Gb/sec (4X EDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/board_id
-Lines: 1
-SM_1141000001000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver
-Lines: 1
-2.31.5050
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/hca_type
-Lines: 1
-MT4099
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data
-Lines: 1
-2221223609
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets
-Lines: 1
-87169372
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data
-Lines: 1
-26509113295
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets
-Lines: 1
-85734114
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait
-Lines: 1
-3599
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data
-Lines: 1
-2460436784
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets
-Lines: 1
-89332064
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data
-Lines: 1
-26540356890
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets
-Lines: 1
-88622850
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait
-Lines: 1
-3846
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net/eth0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_assign_type
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_len
-Lines: 1
-6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/address
-Lines: 1
-01:01:01:01:01:01
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/broadcast
-Lines: 1
-ff:ff:ff:ff:ff:ff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_changes
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_down_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_up_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dev_id
-Lines: 1
-0x20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/device
-SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dormant
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/duplex
-Lines: 1
-full
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/flags
-Lines: 1
-0x1303
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifalias
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifindex
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/iflink
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/link_mode
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/mtu
-Lines: 1
-1500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/name_assign_type
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/netdev_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/operstate
-Lines: 1
-up
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_name
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_switch_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/speed
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/tx_queue_len
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/type
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/nvme
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/nvme/nvme0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/nvme/nvme0/firmware_rev
-Lines: 1
-1B2QEXP7
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/nvme/nvme0/model
-Lines: 1
-Samsung SSD 970 PRO 512GB
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/nvme/nvme0/serial
-Lines: 1
-S680HF8N190894I
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/nvme/nvme0/state
-Lines: 1
-live
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/AC
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/BAT0
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/name
-Lines: 1
-package-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw
-Lines: 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us
-Lines: 1
-976
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj
-Lines: 1
-118821284256
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/name
-Lines: 1
-core
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:a
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/name
-Lines: 1
-package-10
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/scsi_tape
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/nst0
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/nst0a
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/nst0l
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/nst0m
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/st0
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/st0a
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/st0l
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/scsi_tape/st0m
-SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/cur_state
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/max_state
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/type
-Lines: 1
-Processor
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/cur_state
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/max_state
-Lines: 1
-27
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/type
-Lines: 1
-intel_powerclamp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/temp
-Lines: 1
-49925
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/type
-Lines: 1
-bcm2835_thermal
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/mode
-Lines: 1
-enabled
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/passive
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/temp
-Lines: 1
--44000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/type
-Lines: 1
-acpitz
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device
-SymlinkTo: ../../../ACPI0003:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup
-Lines: 1
-enabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms
-Lines: 1
-10598
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type
-Lines: 1
-Mains
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent
-Lines: 2
-POWER_SUPPLY_NAME=AC
-POWER_SUPPLY_ONLINE=0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm
-Lines: 1
-2369000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity
-Lines: 1
-98
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level
-Lines: 1
-Normal
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold
-Lines: 1
-95
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device
-SymlinkTo: ../../../PNP0C0A:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full
-Lines: 1
-50060000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design
-Lines: 1
-47520000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now
-Lines: 1
-49450000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer
-Lines: 1
-LGC
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name
-Lines: 1
-LNV-45N1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now
-Lines: 1
-4830000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number
-Lines: 1
-38109
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status
-Lines: 1
-Discharging
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology
-Lines: 1
-Li-ion
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type
-Lines: 1
-Battery
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent
-Lines: 16
-POWER_SUPPLY_NAME=BAT0
-POWER_SUPPLY_STATUS=Discharging
-POWER_SUPPLY_PRESENT=1
-POWER_SUPPLY_TECHNOLOGY=Li-ion
-POWER_SUPPLY_CYCLE_COUNT=0
-POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000
-POWER_SUPPLY_VOLTAGE_NOW=11750000
-POWER_SUPPLY_POWER_NOW=5064000
-POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000
-POWER_SUPPLY_ENERGY_FULL=47390000
-POWER_SUPPLY_ENERGY_NOW=40730000
-POWER_SUPPLY_CAPACITY=85
-POWER_SUPPLY_CAPACITY_LEVEL=Normal
-POWER_SUPPLY_MODEL_NAME=LNV-45N1
-POWER_SUPPLY_MANUFACTURER=LGC
-POWER_SUPPLY_SERIAL_NUMBER=38109
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design
-Lines: 1
-10800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now
-Lines: 1
-12229000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight
-Lines: 1
-1EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns
-Lines: 1
-9247011087720EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt
-Lines: 1
-1409EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt
-Lines: 1
-979383912EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt
-Lines: 1
-3741EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns
-Lines: 1
-33788355744EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt
-Lines: 1
-19EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt
-Lines: 1
-1496246784000EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt
-Lines: 1
-53772916EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns
-Lines: 1
-5233597394395EOF
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats
-Lines: 5
-Unused: 99%
-Metadata: 0%
-Average: 10473
-Sectors per Q: 64
-Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class
-Lines: 1
-0x020000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device
-Lines: 1
-0x15d7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override
-Lines: 1
-(null)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq
-Lines: 1
-140
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias
-Lines: 1
-pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource
-Lines: 13
-0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision
-Lines: 1
-0x21
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device
-Lines: 1
-0x225a
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor
-Lines: 1
-0x17aa
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent
-Lines: 6
-DRIVER=e1000e
-PCI_CLASS=20000
-PCI_ID=8086:15D7
-PCI_SUBSYS_ID=17AA:225A
-PCI_SLOT_NAME=0000:00:1f.6
-MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor
-Lines: 1
-0x8086
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/name
-Lines: 1
-demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/pool
-Lines: 1
-iscsi-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/name
-Lines: 1
-wrong
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/pool
-Lines: 1
-wrong-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource/clocksource0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource
-Lines: 1
-tsc hpet acpi_pm
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource
-Lines: 1
-tsc
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq
-SymlinkTo: ../cpufreq/policy0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count
-Lines: 1
-10084
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings
-Lines: 1
-11
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list
-Lines: 1
-0,4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq
-Lines: 1
-1200195
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency
-Lines: 1
-4294967295
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus
-Lines: 1
-1
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
-Lines: 1
-powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed
-Lines: 1
-<unsupported>
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count
-Lines: 1
-523
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings
-Lines: 1
-22
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list
-Lines: 1
-1,5
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq
-Lines: 1
-2400000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq
-Lines: 1
-800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq
-Lines: 1
-1219917
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor
-Lines: 1
-powersave
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq
-Lines: 1
-2400000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq
-Lines: 1
-800000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed
-Lines: 1
-<unsupported>
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node1/vmstat
-Lines: 6
-nr_free_pages 1
-nr_zone_inactive_anon 2
-nr_zone_active_anon 3
-nr_zone_inactive_file 4
-nr_zone_active_file 5
-nr_zone_unevictable 6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node2/vmstat
-Lines: 6
-nr_free_pages 7
-nr_zone_inactive_anon 8
-nr_zone_active_anon 9
-nr_zone_inactive_file 10
-nr_zone_active_file 11
-nr_zone_unevictable 12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug
-Lines: 7
-rate: 1.1M/sec
-dirty: 20.4G
-target: 20.4G
-proportional: 427.5k
-integral: 790.0k
-change: 321.5k/sec
-next io: 17ms
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats
-Lines: 5
-Unused: 99%
-Metadata: 0%
-Average: 10473
-Sectors per Q: 64
-Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us
-Lines: 1
-1305
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly
-Lines: 1
-131072
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used
-Lines: 1
-1867776
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used
-Lines: 1
-32768
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label
-Lines: 1
-fixture
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid
-Lines: 1
-0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly
-Lines: 1
-262144
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22
-SymlinkTo: ../../../../devices/virtual/block/loop22
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23
-SymlinkTo: ../../../../devices/virtual/block/loop23
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24
-SymlinkTo: ../../../../devices/virtual/block/loop24
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25
-SymlinkTo: ../../../../devices/virtual/block/loop25
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid
-Lines: 1
-7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sda1/stats/stats
-Lines: 1
-extent_alloc 1 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sdb1/stats/stats
-Lines: 1
-extent_alloc 2 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path
-Lines: 1
-/home/iscsi/file_back_1G
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path
-Lines: 1
-/dev/rbd1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path
-Lines: 1
-/dev/rbd/iscsi-images/demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d
-SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-204950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-40325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026
-SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-104950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-20095
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-71235
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686
-SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-301950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-30195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893
-SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-1234
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-1504
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-4733
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 0040753b1c..3c18c7610e 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -26,7 +26,7 @@ const (
// DefaultSysMountPoint is the common mount point of the sys filesystem.
DefaultSysMountPoint = "/sys"
- // DefaultConfigfsMountPoint is the common mount point of the configfs
+ // DefaultConfigfsMountPoint is the common mount point of the configfs.
DefaultConfigfsMountPoint = "/sys/kernel/config"
)
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 22cb07a6bb..b030951faf 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,7 +14,7 @@
package util
import (
- "io/ioutil"
+ "os"
"strconv"
"strings"
)
@@ -66,7 +66,7 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
- data, err := ioutil.ReadFile(path)
+ data, err := os.ReadFile(path)
if err != nil {
return 0, err
}
@@ -75,7 +75,7 @@ func ReadUintFromFile(path string) (uint64, error) {
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
func ReadIntFromFile(path string) (int64, error) {
- data, err := ioutil.ReadFile(path)
+ data, err := os.ReadFile(path)
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
index 8051161b2a..71b7a70ebd 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
@@ -15,17 +15,16 @@ package util
import (
"io"
- "io/ioutil"
"os"
)
-// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
-// This is similar to ioutil.ReadFile but without the call to os.Stat, because
+// ReadFileNoStat uses io.ReadAll to read contents of entire file.
+// This is similar to os.ReadFile but without the call to os.Stat, because
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
-// Reads a max file size of 512kB. For files larger than this, a scanner
+// Reads a max file size of 1024kB. For files larger than this, a scanner
// should be used.
func ReadFileNoStat(filename string) ([]byte, error) {
- const maxBufferSize = 1024 * 512
+ const maxBufferSize = 1024 * 1024
f, err := os.Open(filename)
if err != nil {
@@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) {
defer f.Close()
reader := io.LimitReader(f, maxBufferSize)
- return ioutil.ReadAll(reader)
+ return io.ReadAll(reader)
}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
index c07de0b6c9..1ab875ceec 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -11,7 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux,!appengine
+//go:build (linux || darwin) && !appengine
+// +build linux darwin
+// +build !appengine
package util
@@ -21,7 +23,7 @@ import (
"syscall"
)
-// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
//
// Note that this function will not read files larger than 128 bytes.
@@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) {
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
- // Go's ioutil.ReadFile implementation to poll forever.
+ // Go's os.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
index bd55b45377..1d86f5e63f 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
@@ -11,7 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux,appengine !linux
+//go:build (linux && appengine) || (!linux && !darwin)
+// +build linux,appengine !linux,!darwin
package util
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index 89e447746c..391c07957e 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"os"
"strconv"
@@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) {
stats IPVSStats
)
- statContent, err := ioutil.ReadAll(r)
+ statContent, err := io.ReadAll(r)
if err != nil {
return IPVSStats{}, err
}
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
index da3a941d60..db88566bdf 100644
--- a/vendor/github.com/prometheus/procfs/kernel_random.go
+++ b/vendor/github.com/prometheus/procfs/kernel_random.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
index 0cce190ec2..0096cafbdf 100644
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -21,7 +21,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// LoadAvg represents an entry in /proc/loadavg
+// LoadAvg represents an entry in /proc/loadavg.
type LoadAvg struct {
Load1 float64
Load5 float64
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index f0b9e5f75a..a95c889cb9 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -15,7 +15,7 @@ package procfs
import (
"fmt"
- "io/ioutil"
+ "os"
"regexp"
"strconv"
"strings"
@@ -64,7 +64,7 @@ type MDStat struct {
// structs containing the relevant info. More information available here:
// https://raid.wiki.kernel.org/index.php/Mdstat
func (fs FS) MDStat() ([]MDStat, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
+ data, err := os.ReadFile(fs.proc.Path("mdstat"))
if err != nil {
return nil, err
}
@@ -166,8 +166,12 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
}
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
+ statusFields := strings.Fields(statusLine)
+ if len(statusFields) < 1 {
+ return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine)
+ }
- sizeStr := strings.Fields(statusLine)[0]
+ sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index 9964a3600b..8300daca05 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -25,7 +25,7 @@ import (
)
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
-// and contains netfilter conntrack statistics at one CPU core
+// and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct {
Entries uint64
Found uint64
@@ -38,12 +38,12 @@ type ConntrackStatEntry struct {
SearchRestart uint64
}
-// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
+// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
}
-// Parses a slice of ConntrackStatEntries from the given filepath
+// Parses a slice of ConntrackStatEntries from the given filepath.
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(path)
@@ -61,7 +61,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
return stat, nil
}
-// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
var entries []ConntrackStatEntry
@@ -79,7 +79,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
return entries, nil
}
-// Parses a ConntrackStatEntry from given array of fields
+// Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
@@ -143,7 +143,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
return entry, nil
}
-// Parses a uint64 from given hex in string
+// Parses a uint64 from given hex in string.
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
index 47a710befb..e66208aa05 100644
--- a/vendor/github.com/prometheus/procfs/net_dev.go
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) {
// parseLine parses a single line from the /proc/net/dev file. Header lines
// must be filtered prior to calling this method.
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
- parts := strings.SplitN(rawLine, ":", 2)
- if len(parts) != 2 {
+ idx := strings.LastIndex(rawLine, ":")
+ if idx == -1 {
return nil, errors.New("invalid net/dev line, missing colon")
}
- fields := strings.Fields(strings.TrimSpace(parts[1]))
+ fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
var err error
line := &NetDevLine{}
// Interface Name
- line.Name = strings.TrimSpace(parts[0])
+ line.Name = strings.TrimSpace(rawLine[:idx])
if line.Name == "" {
return nil, errors.New("invalid net/dev line, empty interface name")
}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index 8c9ee3de87..7fd57d7f46 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -34,7 +34,7 @@ const (
readLimit = 4294967296 // Byte -> 4 GiB
)
-// this contains generic data structures for both udp and tcp sockets
+// This contains generic data structures for both udp and tcp sockets.
type (
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
NetIPSocket []*netIPSocketLine
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
index 8c6de3791b..374b6f73f8 100644
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -23,7 +23,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// NetProtocolStats stores the contents from /proc/net/protocols
+// NetProtocolStats stores the contents from /proc/net/protocols.
type NetProtocolStats map[string]NetProtocolStatLine
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
@@ -41,7 +41,7 @@ type NetProtocolStatLine struct {
Capabilities NetProtocolCapabilities
}
-// NetProtocolCapabilities contains a list of capabilities for each protocol
+// NetProtocolCapabilities contains a list of capabilities for each protocol.
type NetProtocolCapabilities struct {
Close bool // 8
Connect bool // 9
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index 46f12c61d3..a94f86dc4a 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -30,13 +30,13 @@ import (
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
-// SoftnetStat contains a single row of data from /proc/net/softnet_stat
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct {
- // Number of processed packets
+ // Number of processed packets.
Processed uint32
- // Number of dropped packets
+ // Number of dropped packets.
Dropped uint32
- // Number of times processing packets ran out of quota
+ // Number of times processing packets ran out of quota.
TimeSqueezed uint32
}
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
index eed07c7d77..f9d9d243db 100644
--- a/vendor/github.com/prometheus/procfs/xfrm.go
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -79,10 +79,13 @@ type XfrmStat struct {
// Policy is dead
XfrmOutPolDead int
// Policy Error
- XfrmOutPolError int
- XfrmFwdHdrError int
+ XfrmOutPolError int
+ // Forward routing of a packet is not allowed
+ XfrmFwdHdrError int
+ // State is invalid, perhaps expired
XfrmOutStateInvalid int
- XfrmAcquireError int
+ // State hasn’t been fully acquired before use
+ XfrmAcquireError int
}
// NewXfrmStat reads the xfrm_stat statistics.
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
index 94d892f113..dcea9c5a67 100644
--- a/vendor/github.com/prometheus/procfs/netstat.go
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -21,13 +21,13 @@ import (
"strings"
)
-// NetStat contains statistics for all the counters from one file
+// NetStat contains statistics for all the counters from one file.
type NetStat struct {
- Filename string
Stats map[string][]uint64
+ Filename string
}
-// NetStat retrieves stats from /proc/net/stat/
+// NetStat retrieves stats from `/proc/net/stat/`.
func (fs FS) NetStat() ([]NetStat, error) {
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
if err != nil {
@@ -55,7 +55,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
// Other strings represent per-CPU counters
for scanner.Scan() {
for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 32)
+ value, err := strconv.ParseUint(counter, 16, 64)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 28f696803f..c30223af72 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -16,7 +16,7 @@ package procfs
import (
"bytes"
"fmt"
- "io/ioutil"
+ "io"
"os"
"strconv"
"strings"
@@ -82,7 +82,7 @@ func (fs FS) Self() (Proc, error) {
// NewProc returns a process for the given pid.
//
-// Deprecated: use fs.Proc() instead
+// Deprecated: Use fs.Proc() instead.
func (fs FS) NewProc(pid int) (Proc, error) {
return fs.Proc(pid)
}
@@ -142,7 +142,7 @@ func (p Proc) Wchan() (string, error) {
}
defer f.Close()
- data, err := ioutil.ReadAll(f)
+ data, err := io.ReadAll(f)
if err != nil {
return "", err
}
@@ -185,7 +185,7 @@ func (p Proc) Cwd() (string, error) {
return wd, err
}
-// RootDir returns the absolute path to the process's root directory (as set by chroot)
+// RootDir returns the absolute path to the process's root directory (as set by chroot).
func (p Proc) RootDir() (string, error) {
rdir, err := os.Readlink(p.path("root"))
if os.IsNotExist(err) {
@@ -311,7 +311,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
// Schedstat returns task scheduling information for the process.
func (p Proc) Schedstat() (ProcSchedstat, error) {
- contents, err := ioutil.ReadFile(p.path("schedstat"))
+ contents, err := os.ReadFile(p.path("schedstat"))
if err != nil {
return ProcSchedstat{}, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index be45b79873..cca03327c3 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -45,7 +45,7 @@ type Cgroup struct {
}
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
-// Line format is hierarchyID:[controller1,controller2]:path
+// Line format is hierarchyID:[controller1,controller2]:path.
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
var err error
@@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
return cgroup, nil
}
-// parseCgroups reads each line of the /proc/[pid]/cgroup file
+// parseCgroups reads each line of the /proc/[pid]/cgroup file.
func parseCgroups(data []byte) ([]Cgroup, error) {
var cgroups []Cgroup
scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) {
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
-// so the len of the returned struct is equal to the number of active hierarchies on this system
+// so the len of the returned struct is equal to the number of active hierarchies on this system.
func (p Proc) Cgroups() ([]Cgroup, error) {
data, err := util.ReadFileNoStat(p.path("cgroup"))
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
new file mode 100644
index 0000000000..24d4dce9cf
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -0,0 +1,98 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CgroupSummary models one line from /proc/cgroups.
+// This file contains information about the controllers that are compiled into the kernel.
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type CgroupSummary struct {
+ // The name of the controller. controller is also known as subsystem.
+ SubsysName string
+ // The unique ID of the cgroup hierarchy on which this controller is mounted.
+ Hierarchy int
+ // The number of control groups in this hierarchy using this controller.
+ Cgroups int
+ // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
+ Enabled int
+}
+
+// parseCgroupSummary parses each line of the /proc/cgroup file
+// Line format is `subsys_name hierarchy num_cgroups enabled`.
+func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
+ var err error
+
+ fields := strings.Fields(CgroupSummaryStr)
+ // require at least 4 fields
+ if len(fields) < 4 {
+ return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr)
+ }
+
+ CgroupSummary := &CgroupSummary{
+ SubsysName: fields[0],
+ }
+ CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse hierarchy ID")
+ }
+ CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Cgroup Num")
+ }
+ CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Enabled")
+ }
+ return CgroupSummary, nil
+}
+
+// parseCgroupSummary reads each line of the /proc/cgroup file.
+func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
+ var CgroupSummarys []CgroupSummary
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ for scanner.Scan() {
+ CgroupSummaryString := scanner.Text()
+ // ignore comment lines
+ if strings.HasPrefix(CgroupSummaryString, "#") {
+ continue
+ }
+ CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
+ if err != nil {
+ return nil, err
+ }
+ CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
+ }
+
+ err := scanner.Err()
+ return CgroupSummarys, err
+}
+
+// CgroupSummarys returns information about current /proc/cgroups.
+func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
+ if err != nil {
+ return nil, err
+ }
+ return parseCgroupSummary(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
index 6134b3580c..57a89895d6 100644
--- a/vendor/github.com/prometheus/procfs/proc_environ.go
+++ b/vendor/github.com/prometheus/procfs/proc_environ.go
@@ -19,7 +19,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// Environ reads process environments from /proc/<pid>/environ
+// Environ reads process environments from `/proc/<pid>/environ`.
func (p Proc) Environ() ([]string, error) {
environments := make([]string, 0)
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index cf63227f06..1bbdd4a8e9 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -22,7 +22,6 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// Regexp variables
var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
@@ -122,7 +121,7 @@ func (p ProcFDInfos) Len() int { return len(p) }
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
-// InotifyWatchLen returns the total number of inotify watches
+// InotifyWatchLen returns the total number of inotify watches.
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
length := 0
for _, f := range p {
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index dd20f198a3..7a1388185a 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -79,7 +79,7 @@ var (
// NewLimits returns the current soft limits of the process.
//
-// Deprecated: use p.Limits() instead
+// Deprecated: Use p.Limits() instead.
func (p Proc) NewLimits() (ProcLimits, error) {
return p.Limits()
}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
index 1d7772d516..f1bcbf32bb 100644
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -11,7 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build !js
package procfs
@@ -25,7 +27,7 @@ import (
"golang.org/x/sys/unix"
)
-// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
+// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
type ProcMapPermissions struct {
// mapping has the [R]ead flag set
Read bool
@@ -39,8 +41,8 @@ type ProcMapPermissions struct {
Private bool
}
-// ProcMap contains the process memory-mappings of the process,
-// read from /proc/[pid]/maps
+// ProcMap contains the process memory-mappings of the process
+// read from `/proc/[pid]/maps`.
type ProcMap struct {
// The start address of current mapping.
StartAddr uintptr
@@ -79,7 +81,7 @@ func parseDevice(s string) (uint64, error) {
return unix.Mkdev(uint32(major), uint32(minor)), nil
}
-// parseAddress just converts a hex-string to a uintptr
+// parseAddress converts a hex-string to a uintptr.
func parseAddress(s string) (uintptr, error) {
a, err := strconv.ParseUint(s, 16, 0)
if err != nil {
@@ -89,7 +91,7 @@ func parseAddress(s string) (uintptr, error) {
return uintptr(a), nil
}
-// parseAddresses parses the start-end address
+// parseAddresses parses the start-end address.
func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-")
if len(toks) < 2 {
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
new file mode 100644
index 0000000000..48b5238194
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -0,0 +1,440 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcNetstat models the content of /proc/<pid>/net/netstat.
+type ProcNetstat struct {
+ // The process ID.
+ PID int
+ TcpExt
+ IpExt
+}
+
+type TcpExt struct { // nolint:revive
+ SyncookiesSent float64
+ SyncookiesRecv float64
+ SyncookiesFailed float64
+ EmbryonicRsts float64
+ PruneCalled float64
+ RcvPruned float64
+ OfoPruned float64
+ OutOfWindowIcmps float64
+ LockDroppedIcmps float64
+ ArpFilter float64
+ TW float64
+ TWRecycled float64
+ TWKilled float64
+ PAWSActive float64
+ PAWSEstab float64
+ DelayedACKs float64
+ DelayedACKLocked float64
+ DelayedACKLost float64
+ ListenOverflows float64
+ ListenDrops float64
+ TCPHPHits float64
+ TCPPureAcks float64
+ TCPHPAcks float64
+ TCPRenoRecovery float64
+ TCPSackRecovery float64
+ TCPSACKReneging float64
+ TCPSACKReorder float64
+ TCPRenoReorder float64
+ TCPTSReorder float64
+ TCPFullUndo float64
+ TCPPartialUndo float64
+ TCPDSACKUndo float64
+ TCPLossUndo float64
+ TCPLostRetransmit float64
+ TCPRenoFailures float64
+ TCPSackFailures float64
+ TCPLossFailures float64
+ TCPFastRetrans float64
+ TCPSlowStartRetrans float64
+ TCPTimeouts float64
+ TCPLossProbes float64
+ TCPLossProbeRecovery float64
+ TCPRenoRecoveryFail float64
+ TCPSackRecoveryFail float64
+ TCPRcvCollapsed float64
+ TCPDSACKOldSent float64
+ TCPDSACKOfoSent float64
+ TCPDSACKRecv float64
+ TCPDSACKOfoRecv float64
+ TCPAbortOnData float64
+ TCPAbortOnClose float64
+ TCPAbortOnMemory float64
+ TCPAbortOnTimeout float64
+ TCPAbortOnLinger float64
+ TCPAbortFailed float64
+ TCPMemoryPressures float64
+ TCPMemoryPressuresChrono float64
+ TCPSACKDiscard float64
+ TCPDSACKIgnoredOld float64
+ TCPDSACKIgnoredNoUndo float64
+ TCPSpuriousRTOs float64
+ TCPMD5NotFound float64
+ TCPMD5Unexpected float64
+ TCPMD5Failure float64
+ TCPSackShifted float64
+ TCPSackMerged float64
+ TCPSackShiftFallback float64
+ TCPBacklogDrop float64
+ PFMemallocDrop float64
+ TCPMinTTLDrop float64
+ TCPDeferAcceptDrop float64
+ IPReversePathFilter float64
+ TCPTimeWaitOverflow float64
+ TCPReqQFullDoCookies float64
+ TCPReqQFullDrop float64
+ TCPRetransFail float64
+ TCPRcvCoalesce float64
+ TCPOFOQueue float64
+ TCPOFODrop float64
+ TCPOFOMerge float64
+ TCPChallengeACK float64
+ TCPSYNChallenge float64
+ TCPFastOpenActive float64
+ TCPFastOpenActiveFail float64
+ TCPFastOpenPassive float64
+ TCPFastOpenPassiveFail float64
+ TCPFastOpenListenOverflow float64
+ TCPFastOpenCookieReqd float64
+ TCPFastOpenBlackhole float64
+ TCPSpuriousRtxHostQueues float64
+ BusyPollRxPackets float64
+ TCPAutoCorking float64
+ TCPFromZeroWindowAdv float64
+ TCPToZeroWindowAdv float64
+ TCPWantZeroWindowAdv float64
+ TCPSynRetrans float64
+ TCPOrigDataSent float64
+ TCPHystartTrainDetect float64
+ TCPHystartTrainCwnd float64
+ TCPHystartDelayDetect float64
+ TCPHystartDelayCwnd float64
+ TCPACKSkippedSynRecv float64
+ TCPACKSkippedPAWS float64
+ TCPACKSkippedSeq float64
+ TCPACKSkippedFinWait2 float64
+ TCPACKSkippedTimeWait float64
+ TCPACKSkippedChallenge float64
+ TCPWinProbe float64
+ TCPKeepAlive float64
+ TCPMTUPFail float64
+ TCPMTUPSuccess float64
+ TCPWqueueTooBig float64
+}
+
+type IpExt struct { // nolint:revive
+ InNoRoutes float64
+ InTruncatedPkts float64
+ InMcastPkts float64
+ OutMcastPkts float64
+ InBcastPkts float64
+ OutBcastPkts float64
+ InOctets float64
+ OutOctets float64
+ InMcastOctets float64
+ OutMcastOctets float64
+ InBcastOctets float64
+ OutBcastOctets float64
+ InCsumErrors float64
+ InNoECTPkts float64
+ InECT1Pkts float64
+ InECT0Pkts float64
+ InCEPkts float64
+ ReasmOverlaps float64
+}
+
+func (p Proc) Netstat() (ProcNetstat, error) {
+ filename := p.path("net/netstat")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ return ProcNetstat{PID: p.PID}, err
+ }
+ procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
+ procNetstat.PID = p.PID
+ return procNetstat, err
+}
+
+// parseNetstat parses the metrics from proc/<pid>/net/netstat file
+// and returns a ProcNetstat structure.
+func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procNetstat = ProcNetstat{}
+ )
+
+ for scanner.Scan() {
+ nameParts := strings.Split(scanner.Text(), " ")
+ scanner.Scan()
+ valueParts := strings.Split(scanner.Text(), " ")
+ // Remove trailing :.
+ protocol := strings.TrimSuffix(nameParts[0], ":")
+ if len(nameParts) != len(valueParts) {
+ return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s",
+ fileName, protocol)
+ }
+ for i := 1; i < len(nameParts); i++ {
+ value, err := strconv.ParseFloat(valueParts[i], 64)
+ if err != nil {
+ return procNetstat, err
+ }
+ key := nameParts[i]
+
+ switch protocol {
+ case "TcpExt":
+ switch key {
+ case "SyncookiesSent":
+ procNetstat.TcpExt.SyncookiesSent = value
+ case "SyncookiesRecv":
+ procNetstat.TcpExt.SyncookiesRecv = value
+ case "SyncookiesFailed":
+ procNetstat.TcpExt.SyncookiesFailed = value
+ case "EmbryonicRsts":
+ procNetstat.TcpExt.EmbryonicRsts = value
+ case "PruneCalled":
+ procNetstat.TcpExt.PruneCalled = value
+ case "RcvPruned":
+ procNetstat.TcpExt.RcvPruned = value
+ case "OfoPruned":
+ procNetstat.TcpExt.OfoPruned = value
+ case "OutOfWindowIcmps":
+ procNetstat.TcpExt.OutOfWindowIcmps = value
+ case "LockDroppedIcmps":
+ procNetstat.TcpExt.LockDroppedIcmps = value
+ case "ArpFilter":
+ procNetstat.TcpExt.ArpFilter = value
+ case "TW":
+ procNetstat.TcpExt.TW = value
+ case "TWRecycled":
+ procNetstat.TcpExt.TWRecycled = value
+ case "TWKilled":
+ procNetstat.TcpExt.TWKilled = value
+ case "PAWSActive":
+ procNetstat.TcpExt.PAWSActive = value
+ case "PAWSEstab":
+ procNetstat.TcpExt.PAWSEstab = value
+ case "DelayedACKs":
+ procNetstat.TcpExt.DelayedACKs = value
+ case "DelayedACKLocked":
+ procNetstat.TcpExt.DelayedACKLocked = value
+ case "DelayedACKLost":
+ procNetstat.TcpExt.DelayedACKLost = value
+ case "ListenOverflows":
+ procNetstat.TcpExt.ListenOverflows = value
+ case "ListenDrops":
+ procNetstat.TcpExt.ListenDrops = value
+ case "TCPHPHits":
+ procNetstat.TcpExt.TCPHPHits = value
+ case "TCPPureAcks":
+ procNetstat.TcpExt.TCPPureAcks = value
+ case "TCPHPAcks":
+ procNetstat.TcpExt.TCPHPAcks = value
+ case "TCPRenoRecovery":
+ procNetstat.TcpExt.TCPRenoRecovery = value
+ case "TCPSackRecovery":
+ procNetstat.TcpExt.TCPSackRecovery = value
+ case "TCPSACKReneging":
+ procNetstat.TcpExt.TCPSACKReneging = value
+ case "TCPSACKReorder":
+ procNetstat.TcpExt.TCPSACKReorder = value
+ case "TCPRenoReorder":
+ procNetstat.TcpExt.TCPRenoReorder = value
+ case "TCPTSReorder":
+ procNetstat.TcpExt.TCPTSReorder = value
+ case "TCPFullUndo":
+ procNetstat.TcpExt.TCPFullUndo = value
+ case "TCPPartialUndo":
+ procNetstat.TcpExt.TCPPartialUndo = value
+ case "TCPDSACKUndo":
+ procNetstat.TcpExt.TCPDSACKUndo = value
+ case "TCPLossUndo":
+ procNetstat.TcpExt.TCPLossUndo = value
+ case "TCPLostRetransmit":
+ procNetstat.TcpExt.TCPLostRetransmit = value
+ case "TCPRenoFailures":
+ procNetstat.TcpExt.TCPRenoFailures = value
+ case "TCPSackFailures":
+ procNetstat.TcpExt.TCPSackFailures = value
+ case "TCPLossFailures":
+ procNetstat.TcpExt.TCPLossFailures = value
+ case "TCPFastRetrans":
+ procNetstat.TcpExt.TCPFastRetrans = value
+ case "TCPSlowStartRetrans":
+ procNetstat.TcpExt.TCPSlowStartRetrans = value
+ case "TCPTimeouts":
+ procNetstat.TcpExt.TCPTimeouts = value
+ case "TCPLossProbes":
+ procNetstat.TcpExt.TCPLossProbes = value
+ case "TCPLossProbeRecovery":
+ procNetstat.TcpExt.TCPLossProbeRecovery = value
+ case "TCPRenoRecoveryFail":
+ procNetstat.TcpExt.TCPRenoRecoveryFail = value
+ case "TCPSackRecoveryFail":
+ procNetstat.TcpExt.TCPSackRecoveryFail = value
+ case "TCPRcvCollapsed":
+ procNetstat.TcpExt.TCPRcvCollapsed = value
+ case "TCPDSACKOldSent":
+ procNetstat.TcpExt.TCPDSACKOldSent = value
+ case "TCPDSACKOfoSent":
+ procNetstat.TcpExt.TCPDSACKOfoSent = value
+ case "TCPDSACKRecv":
+ procNetstat.TcpExt.TCPDSACKRecv = value
+ case "TCPDSACKOfoRecv":
+ procNetstat.TcpExt.TCPDSACKOfoRecv = value
+ case "TCPAbortOnData":
+ procNetstat.TcpExt.TCPAbortOnData = value
+ case "TCPAbortOnClose":
+ procNetstat.TcpExt.TCPAbortOnClose = value
+ case "TCPDeferAcceptDrop":
+ procNetstat.TcpExt.TCPDeferAcceptDrop = value
+ case "IPReversePathFilter":
+ procNetstat.TcpExt.IPReversePathFilter = value
+ case "TCPTimeWaitOverflow":
+ procNetstat.TcpExt.TCPTimeWaitOverflow = value
+ case "TCPReqQFullDoCookies":
+ procNetstat.TcpExt.TCPReqQFullDoCookies = value
+ case "TCPReqQFullDrop":
+ procNetstat.TcpExt.TCPReqQFullDrop = value
+ case "TCPRetransFail":
+ procNetstat.TcpExt.TCPRetransFail = value
+ case "TCPRcvCoalesce":
+ procNetstat.TcpExt.TCPRcvCoalesce = value
+ case "TCPOFOQueue":
+ procNetstat.TcpExt.TCPOFOQueue = value
+ case "TCPOFODrop":
+ procNetstat.TcpExt.TCPOFODrop = value
+ case "TCPOFOMerge":
+ procNetstat.TcpExt.TCPOFOMerge = value
+ case "TCPChallengeACK":
+ procNetstat.TcpExt.TCPChallengeACK = value
+ case "TCPSYNChallenge":
+ procNetstat.TcpExt.TCPSYNChallenge = value
+ case "TCPFastOpenActive":
+ procNetstat.TcpExt.TCPFastOpenActive = value
+ case "TCPFastOpenActiveFail":
+ procNetstat.TcpExt.TCPFastOpenActiveFail = value
+ case "TCPFastOpenPassive":
+ procNetstat.TcpExt.TCPFastOpenPassive = value
+ case "TCPFastOpenPassiveFail":
+ procNetstat.TcpExt.TCPFastOpenPassiveFail = value
+ case "TCPFastOpenListenOverflow":
+ procNetstat.TcpExt.TCPFastOpenListenOverflow = value
+ case "TCPFastOpenCookieReqd":
+ procNetstat.TcpExt.TCPFastOpenCookieReqd = value
+ case "TCPFastOpenBlackhole":
+ procNetstat.TcpExt.TCPFastOpenBlackhole = value
+ case "TCPSpuriousRtxHostQueues":
+ procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
+ case "BusyPollRxPackets":
+ procNetstat.TcpExt.BusyPollRxPackets = value
+ case "TCPAutoCorking":
+ procNetstat.TcpExt.TCPAutoCorking = value
+ case "TCPFromZeroWindowAdv":
+ procNetstat.TcpExt.TCPFromZeroWindowAdv = value
+ case "TCPToZeroWindowAdv":
+ procNetstat.TcpExt.TCPToZeroWindowAdv = value
+ case "TCPWantZeroWindowAdv":
+ procNetstat.TcpExt.TCPWantZeroWindowAdv = value
+ case "TCPSynRetrans":
+ procNetstat.TcpExt.TCPSynRetrans = value
+ case "TCPOrigDataSent":
+ procNetstat.TcpExt.TCPOrigDataSent = value
+ case "TCPHystartTrainDetect":
+ procNetstat.TcpExt.TCPHystartTrainDetect = value
+ case "TCPHystartTrainCwnd":
+ procNetstat.TcpExt.TCPHystartTrainCwnd = value
+ case "TCPHystartDelayDetect":
+ procNetstat.TcpExt.TCPHystartDelayDetect = value
+ case "TCPHystartDelayCwnd":
+ procNetstat.TcpExt.TCPHystartDelayCwnd = value
+ case "TCPACKSkippedSynRecv":
+ procNetstat.TcpExt.TCPACKSkippedSynRecv = value
+ case "TCPACKSkippedPAWS":
+ procNetstat.TcpExt.TCPACKSkippedPAWS = value
+ case "TCPACKSkippedSeq":
+ procNetstat.TcpExt.TCPACKSkippedSeq = value
+ case "TCPACKSkippedFinWait2":
+ procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
+ case "TCPACKSkippedTimeWait":
+ procNetstat.TcpExt.TCPACKSkippedTimeWait = value
+ case "TCPACKSkippedChallenge":
+ procNetstat.TcpExt.TCPACKSkippedChallenge = value
+ case "TCPWinProbe":
+ procNetstat.TcpExt.TCPWinProbe = value
+ case "TCPKeepAlive":
+ procNetstat.TcpExt.TCPKeepAlive = value
+ case "TCPMTUPFail":
+ procNetstat.TcpExt.TCPMTUPFail = value
+ case "TCPMTUPSuccess":
+ procNetstat.TcpExt.TCPMTUPSuccess = value
+ case "TCPWqueueTooBig":
+ procNetstat.TcpExt.TCPWqueueTooBig = value
+ }
+ case "IpExt":
+ switch key {
+ case "InNoRoutes":
+ procNetstat.IpExt.InNoRoutes = value
+ case "InTruncatedPkts":
+ procNetstat.IpExt.InTruncatedPkts = value
+ case "InMcastPkts":
+ procNetstat.IpExt.InMcastPkts = value
+ case "OutMcastPkts":
+ procNetstat.IpExt.OutMcastPkts = value
+ case "InBcastPkts":
+ procNetstat.IpExt.InBcastPkts = value
+ case "OutBcastPkts":
+ procNetstat.IpExt.OutBcastPkts = value
+ case "InOctets":
+ procNetstat.IpExt.InOctets = value
+ case "OutOctets":
+ procNetstat.IpExt.OutOctets = value
+ case "InMcastOctets":
+ procNetstat.IpExt.InMcastOctets = value
+ case "OutMcastOctets":
+ procNetstat.IpExt.OutMcastOctets = value
+ case "InBcastOctets":
+ procNetstat.IpExt.InBcastOctets = value
+ case "OutBcastOctets":
+ procNetstat.IpExt.OutBcastOctets = value
+ case "InCsumErrors":
+ procNetstat.IpExt.InCsumErrors = value
+ case "InNoECTPkts":
+ procNetstat.IpExt.InNoECTPkts = value
+ case "InECT1Pkts":
+ procNetstat.IpExt.InECT1Pkts = value
+ case "InECT0Pkts":
+ procNetstat.IpExt.InECT0Pkts = value
+ case "InCEPkts":
+ procNetstat.IpExt.InCEPkts = value
+ case "ReasmOverlaps":
+ procNetstat.IpExt.ReasmOverlaps = value
+ }
+ }
+ }
+ }
+ return procNetstat, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
index dc6c14f0a4..a68fe15290 100644
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -35,9 +35,10 @@ import (
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
-// PSILine is a single line of values as returned by /proc/pressure/*
-// The Avg entries are averages over n seconds, as a percentage
-// The Total line is in microseconds
+// PSILine is a single line of values as returned by `/proc/pressure/*`.
+//
+// The Avg entries are averages over n seconds, as a percentage.
+// The Total line is in microseconds.
type PSILine struct {
Avg10 float64
Avg60 float64
@@ -46,8 +47,9 @@ type PSILine struct {
}
// PSIStats represent pressure stall information from /proc/pressure/*
-// Some indicates the share of time in which at least some tasks are stalled
-// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
+//
+// "Some" indicates the share of time in which at least some tasks are stalled.
+// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
type PSIStats struct {
Some *PSILine
Full *PSILine
@@ -65,7 +67,7 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
return parsePSIStats(resource, bytes.NewReader(data))
}
-// parsePSIStats parses the specified file for pressure stall information
+// parsePSIStats parses the specified file for pressure stall information.
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index a576a720a4..0e97d99575 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
@@ -28,30 +29,30 @@ import (
)
var (
- // match the header line before each mapped zone in /proc/pid/smaps
+ // match the header line before each mapped zone in `/proc/pid/smaps`.
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
type ProcSMapsRollup struct {
- // Amount of the mapping that is currently resident in RAM
+ // Amount of the mapping that is currently resident in RAM.
Rss uint64
- // Process's proportional share of this mapping
+ // Process's proportional share of this mapping.
Pss uint64
- // Size in bytes of clean shared pages
+ // Size in bytes of clean shared pages.
SharedClean uint64
- // Size in bytes of dirty shared pages
+ // Size in bytes of dirty shared pages.
SharedDirty uint64
- // Size in bytes of clean private pages
+ // Size in bytes of clean private pages.
PrivateClean uint64
- // Size in bytes of dirty private pages
+ // Size in bytes of dirty private pages.
PrivateDirty uint64
- // Amount of memory currently marked as referenced or accessed
+ // Amount of memory currently marked as referenced or accessed.
Referenced uint64
- // Amount of memory that does not belong to any file
+ // Amount of memory that does not belong to any file.
Anonymous uint64
- // Amount would-be-anonymous memory currently on swap
+ // Amount would-be-anonymous memory currently on swap.
Swap uint64
- // Process's proportional memory on swap
+ // Process's proportional memory on swap.
SwapPss uint64
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
new file mode 100644
index 0000000000..ae191896cb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -0,0 +1,353 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp models the content of /proc/<pid>/net/snmp.
+type ProcSnmp struct {
+ // The process ID.
+ PID int
+ Ip
+ Icmp
+ IcmpMsg
+ Tcp
+ Udp
+ UdpLite
+}
+
+type Ip struct { // nolint:revive
+ Forwarding float64
+ DefaultTTL float64
+ InReceives float64
+ InHdrErrors float64
+ InAddrErrors float64
+ ForwDatagrams float64
+ InUnknownProtos float64
+ InDiscards float64
+ InDelivers float64
+ OutRequests float64
+ OutDiscards float64
+ OutNoRoutes float64
+ ReasmTimeout float64
+ ReasmReqds float64
+ ReasmOKs float64
+ ReasmFails float64
+ FragOKs float64
+ FragFails float64
+ FragCreates float64
+}
+
+type Icmp struct {
+ InMsgs float64
+ InErrors float64
+ InCsumErrors float64
+ InDestUnreachs float64
+ InTimeExcds float64
+ InParmProbs float64
+ InSrcQuenchs float64
+ InRedirects float64
+ InEchos float64
+ InEchoReps float64
+ InTimestamps float64
+ InTimestampReps float64
+ InAddrMasks float64
+ InAddrMaskReps float64
+ OutMsgs float64
+ OutErrors float64
+ OutDestUnreachs float64
+ OutTimeExcds float64
+ OutParmProbs float64
+ OutSrcQuenchs float64
+ OutRedirects float64
+ OutEchos float64
+ OutEchoReps float64
+ OutTimestamps float64
+ OutTimestampReps float64
+ OutAddrMasks float64
+ OutAddrMaskReps float64
+}
+
+type IcmpMsg struct {
+ InType3 float64
+ OutType3 float64
+}
+
+type Tcp struct { // nolint:revive
+ RtoAlgorithm float64
+ RtoMin float64
+ RtoMax float64
+ MaxConn float64
+ ActiveOpens float64
+ PassiveOpens float64
+ AttemptFails float64
+ EstabResets float64
+ CurrEstab float64
+ InSegs float64
+ OutSegs float64
+ RetransSegs float64
+ InErrs float64
+ OutRsts float64
+ InCsumErrors float64
+}
+
+type Udp struct { // nolint:revive
+ InDatagrams float64
+ NoPorts float64
+ InErrors float64
+ OutDatagrams float64
+ RcvbufErrors float64
+ SndbufErrors float64
+ InCsumErrors float64
+ IgnoredMulti float64
+}
+
+type UdpLite struct { // nolint:revive
+ InDatagrams float64
+ NoPorts float64
+ InErrors float64
+ OutDatagrams float64
+ RcvbufErrors float64
+ SndbufErrors float64
+ InCsumErrors float64
+ IgnoredMulti float64
+}
+
+func (p Proc) Snmp() (ProcSnmp, error) {
+ filename := p.path("net/snmp")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ return ProcSnmp{PID: p.PID}, err
+ }
+ procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
+ procSnmp.PID = p.PID
+ return procSnmp, err
+}
+
+// parseSnmp parses the metrics from proc/<pid>/net/snmp file
+// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
+func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procSnmp = ProcSnmp{}
+ )
+
+ for scanner.Scan() {
+ nameParts := strings.Split(scanner.Text(), " ")
+ scanner.Scan()
+ valueParts := strings.Split(scanner.Text(), " ")
+ // Remove trailing :.
+ protocol := strings.TrimSuffix(nameParts[0], ":")
+ if len(nameParts) != len(valueParts) {
+ return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s",
+ fileName, protocol)
+ }
+ for i := 1; i < len(nameParts); i++ {
+ value, err := strconv.ParseFloat(valueParts[i], 64)
+ if err != nil {
+ return procSnmp, err
+ }
+ key := nameParts[i]
+
+ switch protocol {
+ case "Ip":
+ switch key {
+ case "Forwarding":
+ procSnmp.Ip.Forwarding = value
+ case "DefaultTTL":
+ procSnmp.Ip.DefaultTTL = value
+ case "InReceives":
+ procSnmp.Ip.InReceives = value
+ case "InHdrErrors":
+ procSnmp.Ip.InHdrErrors = value
+ case "InAddrErrors":
+ procSnmp.Ip.InAddrErrors = value
+ case "ForwDatagrams":
+ procSnmp.Ip.ForwDatagrams = value
+ case "InUnknownProtos":
+ procSnmp.Ip.InUnknownProtos = value
+ case "InDiscards":
+ procSnmp.Ip.InDiscards = value
+ case "InDelivers":
+ procSnmp.Ip.InDelivers = value
+ case "OutRequests":
+ procSnmp.Ip.OutRequests = value
+ case "OutDiscards":
+ procSnmp.Ip.OutDiscards = value
+ case "OutNoRoutes":
+ procSnmp.Ip.OutNoRoutes = value
+ case "ReasmTimeout":
+ procSnmp.Ip.ReasmTimeout = value
+ case "ReasmReqds":
+ procSnmp.Ip.ReasmReqds = value
+ case "ReasmOKs":
+ procSnmp.Ip.ReasmOKs = value
+ case "ReasmFails":
+ procSnmp.Ip.ReasmFails = value
+ case "FragOKs":
+ procSnmp.Ip.FragOKs = value
+ case "FragFails":
+ procSnmp.Ip.FragFails = value
+ case "FragCreates":
+ procSnmp.Ip.FragCreates = value
+ }
+ case "Icmp":
+ switch key {
+ case "InMsgs":
+ procSnmp.Icmp.InMsgs = value
+ case "InErrors":
+ procSnmp.Icmp.InErrors = value
+ case "InCsumErrors":
+ procSnmp.Icmp.InCsumErrors = value
+ case "InDestUnreachs":
+ procSnmp.Icmp.InDestUnreachs = value
+ case "InTimeExcds":
+ procSnmp.Icmp.InTimeExcds = value
+ case "InParmProbs":
+ procSnmp.Icmp.InParmProbs = value
+ case "InSrcQuenchs":
+ procSnmp.Icmp.InSrcQuenchs = value
+ case "InRedirects":
+ procSnmp.Icmp.InRedirects = value
+ case "InEchos":
+ procSnmp.Icmp.InEchos = value
+ case "InEchoReps":
+ procSnmp.Icmp.InEchoReps = value
+ case "InTimestamps":
+ procSnmp.Icmp.InTimestamps = value
+ case "InTimestampReps":
+ procSnmp.Icmp.InTimestampReps = value
+ case "InAddrMasks":
+ procSnmp.Icmp.InAddrMasks = value
+ case "InAddrMaskReps":
+ procSnmp.Icmp.InAddrMaskReps = value
+ case "OutMsgs":
+ procSnmp.Icmp.OutMsgs = value
+ case "OutErrors":
+ procSnmp.Icmp.OutErrors = value
+ case "OutDestUnreachs":
+ procSnmp.Icmp.OutDestUnreachs = value
+ case "OutTimeExcds":
+ procSnmp.Icmp.OutTimeExcds = value
+ case "OutParmProbs":
+ procSnmp.Icmp.OutParmProbs = value
+ case "OutSrcQuenchs":
+ procSnmp.Icmp.OutSrcQuenchs = value
+ case "OutRedirects":
+ procSnmp.Icmp.OutRedirects = value
+ case "OutEchos":
+ procSnmp.Icmp.OutEchos = value
+ case "OutEchoReps":
+ procSnmp.Icmp.OutEchoReps = value
+ case "OutTimestamps":
+ procSnmp.Icmp.OutTimestamps = value
+ case "OutTimestampReps":
+ procSnmp.Icmp.OutTimestampReps = value
+ case "OutAddrMasks":
+ procSnmp.Icmp.OutAddrMasks = value
+ case "OutAddrMaskReps":
+ procSnmp.Icmp.OutAddrMaskReps = value
+ }
+ case "IcmpMsg":
+ switch key {
+ case "InType3":
+ procSnmp.IcmpMsg.InType3 = value
+ case "OutType3":
+ procSnmp.IcmpMsg.OutType3 = value
+ }
+ case "Tcp":
+ switch key {
+ case "RtoAlgorithm":
+ procSnmp.Tcp.RtoAlgorithm = value
+ case "RtoMin":
+ procSnmp.Tcp.RtoMin = value
+ case "RtoMax":
+ procSnmp.Tcp.RtoMax = value
+ case "MaxConn":
+ procSnmp.Tcp.MaxConn = value
+ case "ActiveOpens":
+ procSnmp.Tcp.ActiveOpens = value
+ case "PassiveOpens":
+ procSnmp.Tcp.PassiveOpens = value
+ case "AttemptFails":
+ procSnmp.Tcp.AttemptFails = value
+ case "EstabResets":
+ procSnmp.Tcp.EstabResets = value
+ case "CurrEstab":
+ procSnmp.Tcp.CurrEstab = value
+ case "InSegs":
+ procSnmp.Tcp.InSegs = value
+ case "OutSegs":
+ procSnmp.Tcp.OutSegs = value
+ case "RetransSegs":
+ procSnmp.Tcp.RetransSegs = value
+ case "InErrs":
+ procSnmp.Tcp.InErrs = value
+ case "OutRsts":
+ procSnmp.Tcp.OutRsts = value
+ case "InCsumErrors":
+ procSnmp.Tcp.InCsumErrors = value
+ }
+ case "Udp":
+ switch key {
+ case "InDatagrams":
+ procSnmp.Udp.InDatagrams = value
+ case "NoPorts":
+ procSnmp.Udp.NoPorts = value
+ case "InErrors":
+ procSnmp.Udp.InErrors = value
+ case "OutDatagrams":
+ procSnmp.Udp.OutDatagrams = value
+ case "RcvbufErrors":
+ procSnmp.Udp.RcvbufErrors = value
+ case "SndbufErrors":
+ procSnmp.Udp.SndbufErrors = value
+ case "InCsumErrors":
+ procSnmp.Udp.InCsumErrors = value
+ case "IgnoredMulti":
+ procSnmp.Udp.IgnoredMulti = value
+ }
+ case "UdpLite":
+ switch key {
+ case "InDatagrams":
+ procSnmp.UdpLite.InDatagrams = value
+ case "NoPorts":
+ procSnmp.UdpLite.NoPorts = value
+ case "InErrors":
+ procSnmp.UdpLite.InErrors = value
+ case "OutDatagrams":
+ procSnmp.UdpLite.OutDatagrams = value
+ case "RcvbufErrors":
+ procSnmp.UdpLite.RcvbufErrors = value
+ case "SndbufErrors":
+ procSnmp.UdpLite.SndbufErrors = value
+ case "InCsumErrors":
+ procSnmp.UdpLite.InCsumErrors = value
+ case "IgnoredMulti":
+ procSnmp.UdpLite.IgnoredMulti = value
+ }
+ }
+ }
+ }
+ return procSnmp, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
new file mode 100644
index 0000000000..f611992d52
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -0,0 +1,381 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp6 models the content of /proc/<pid>/net/snmp6.
+type ProcSnmp6 struct {
+ // The process ID.
+ PID int
+ Ip6
+ Icmp6
+ Udp6
+ UdpLite6
+}
+
+type Ip6 struct { // nolint:revive
+ InReceives float64
+ InHdrErrors float64
+ InTooBigErrors float64
+ InNoRoutes float64
+ InAddrErrors float64
+ InUnknownProtos float64
+ InTruncatedPkts float64
+ InDiscards float64
+ InDelivers float64
+ OutForwDatagrams float64
+ OutRequests float64
+ OutDiscards float64
+ OutNoRoutes float64
+ ReasmTimeout float64
+ ReasmReqds float64
+ ReasmOKs float64
+ ReasmFails float64
+ FragOKs float64
+ FragFails float64
+ FragCreates float64
+ InMcastPkts float64
+ OutMcastPkts float64
+ InOctets float64
+ OutOctets float64
+ InMcastOctets float64
+ OutMcastOctets float64
+ InBcastOctets float64
+ OutBcastOctets float64
+ InNoECTPkts float64
+ InECT1Pkts float64
+ InECT0Pkts float64
+ InCEPkts float64
+}
+
+type Icmp6 struct {
+ InMsgs float64
+ InErrors float64
+ OutMsgs float64
+ OutErrors float64
+ InCsumErrors float64
+ InDestUnreachs float64
+ InPktTooBigs float64
+ InTimeExcds float64
+ InParmProblems float64
+ InEchos float64
+ InEchoReplies float64
+ InGroupMembQueries float64
+ InGroupMembResponses float64
+ InGroupMembReductions float64
+ InRouterSolicits float64
+ InRouterAdvertisements float64
+ InNeighborSolicits float64
+ InNeighborAdvertisements float64
+ InRedirects float64
+ InMLDv2Reports float64
+ OutDestUnreachs float64
+ OutPktTooBigs float64
+ OutTimeExcds float64
+ OutParmProblems float64
+ OutEchos float64
+ OutEchoReplies float64
+ OutGroupMembQueries float64
+ OutGroupMembResponses float64
+ OutGroupMembReductions float64
+ OutRouterSolicits float64
+ OutRouterAdvertisements float64
+ OutNeighborSolicits float64
+ OutNeighborAdvertisements float64
+ OutRedirects float64
+ OutMLDv2Reports float64
+ InType1 float64
+ InType134 float64
+ InType135 float64
+ InType136 float64
+ InType143 float64
+ OutType133 float64
+ OutType135 float64
+ OutType136 float64
+ OutType143 float64
+}
+
+type Udp6 struct { // nolint:revive
+ InDatagrams float64
+ NoPorts float64
+ InErrors float64
+ OutDatagrams float64
+ RcvbufErrors float64
+ SndbufErrors float64
+ InCsumErrors float64
+ IgnoredMulti float64
+}
+
+type UdpLite6 struct { // nolint:revive
+ InDatagrams float64
+ NoPorts float64
+ InErrors float64
+ OutDatagrams float64
+ RcvbufErrors float64
+ SndbufErrors float64
+ InCsumErrors float64
+}
+
+func (p Proc) Snmp6() (ProcSnmp6, error) {
+ filename := p.path("net/snmp6")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ // On systems with IPv6 disabled, this file won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return ProcSnmp6{PID: p.PID}, nil
+ }
+
+ return ProcSnmp6{PID: p.PID}, err
+ }
+
+ procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
+ procSnmp6.PID = p.PID
+ return procSnmp6, err
+}
+
+// parseSnmp6 parses the metrics from proc/<pid>/net/snmp6 file
+// and returns a map contains those metrics.
+func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procSnmp6 = ProcSnmp6{}
+ )
+
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ // Expect to have "6" in metric name, skip line otherwise
+ if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
+ protocol := stat[0][:sixIndex+1]
+ key := stat[0][sixIndex+1:]
+ value, err := strconv.ParseFloat(stat[1], 64)
+ if err != nil {
+ return procSnmp6, err
+ }
+
+ switch protocol {
+ case "Ip6":
+ switch key {
+ case "InReceives":
+ procSnmp6.Ip6.InReceives = value
+ case "InHdrErrors":
+ procSnmp6.Ip6.InHdrErrors = value
+ case "InTooBigErrors":
+ procSnmp6.Ip6.InTooBigErrors = value
+ case "InNoRoutes":
+ procSnmp6.Ip6.InNoRoutes = value
+ case "InAddrErrors":
+ procSnmp6.Ip6.InAddrErrors = value
+ case "InUnknownProtos":
+ procSnmp6.Ip6.InUnknownProtos = value
+ case "InTruncatedPkts":
+ procSnmp6.Ip6.InTruncatedPkts = value
+ case "InDiscards":
+ procSnmp6.Ip6.InDiscards = value
+ case "InDelivers":
+ procSnmp6.Ip6.InDelivers = value
+ case "OutForwDatagrams":
+ procSnmp6.Ip6.OutForwDatagrams = value
+ case "OutRequests":
+ procSnmp6.Ip6.OutRequests = value
+ case "OutDiscards":
+ procSnmp6.Ip6.OutDiscards = value
+ case "OutNoRoutes":
+ procSnmp6.Ip6.OutNoRoutes = value
+ case "ReasmTimeout":
+ procSnmp6.Ip6.ReasmTimeout = value
+ case "ReasmReqds":
+ procSnmp6.Ip6.ReasmReqds = value
+ case "ReasmOKs":
+ procSnmp6.Ip6.ReasmOKs = value
+ case "ReasmFails":
+ procSnmp6.Ip6.ReasmFails = value
+ case "FragOKs":
+ procSnmp6.Ip6.FragOKs = value
+ case "FragFails":
+ procSnmp6.Ip6.FragFails = value
+ case "FragCreates":
+ procSnmp6.Ip6.FragCreates = value
+ case "InMcastPkts":
+ procSnmp6.Ip6.InMcastPkts = value
+ case "OutMcastPkts":
+ procSnmp6.Ip6.OutMcastPkts = value
+ case "InOctets":
+ procSnmp6.Ip6.InOctets = value
+ case "OutOctets":
+ procSnmp6.Ip6.OutOctets = value
+ case "InMcastOctets":
+ procSnmp6.Ip6.InMcastOctets = value
+ case "OutMcastOctets":
+ procSnmp6.Ip6.OutMcastOctets = value
+ case "InBcastOctets":
+ procSnmp6.Ip6.InBcastOctets = value
+ case "OutBcastOctets":
+ procSnmp6.Ip6.OutBcastOctets = value
+ case "InNoECTPkts":
+ procSnmp6.Ip6.InNoECTPkts = value
+ case "InECT1Pkts":
+ procSnmp6.Ip6.InECT1Pkts = value
+ case "InECT0Pkts":
+ procSnmp6.Ip6.InECT0Pkts = value
+ case "InCEPkts":
+ procSnmp6.Ip6.InCEPkts = value
+
+ }
+ case "Icmp6":
+ switch key {
+ case "InMsgs":
+ procSnmp6.Icmp6.InMsgs = value
+ case "InErrors":
+ procSnmp6.Icmp6.InErrors = value
+ case "OutMsgs":
+ procSnmp6.Icmp6.OutMsgs = value
+ case "OutErrors":
+ procSnmp6.Icmp6.OutErrors = value
+ case "InCsumErrors":
+ procSnmp6.Icmp6.InCsumErrors = value
+ case "InDestUnreachs":
+ procSnmp6.Icmp6.InDestUnreachs = value
+ case "InPktTooBigs":
+ procSnmp6.Icmp6.InPktTooBigs = value
+ case "InTimeExcds":
+ procSnmp6.Icmp6.InTimeExcds = value
+ case "InParmProblems":
+ procSnmp6.Icmp6.InParmProblems = value
+ case "InEchos":
+ procSnmp6.Icmp6.InEchos = value
+ case "InEchoReplies":
+ procSnmp6.Icmp6.InEchoReplies = value
+ case "InGroupMembQueries":
+ procSnmp6.Icmp6.InGroupMembQueries = value
+ case "InGroupMembResponses":
+ procSnmp6.Icmp6.InGroupMembResponses = value
+ case "InGroupMembReductions":
+ procSnmp6.Icmp6.InGroupMembReductions = value
+ case "InRouterSolicits":
+ procSnmp6.Icmp6.InRouterSolicits = value
+ case "InRouterAdvertisements":
+ procSnmp6.Icmp6.InRouterAdvertisements = value
+ case "InNeighborSolicits":
+ procSnmp6.Icmp6.InNeighborSolicits = value
+ case "InNeighborAdvertisements":
+ procSnmp6.Icmp6.InNeighborAdvertisements = value
+ case "InRedirects":
+ procSnmp6.Icmp6.InRedirects = value
+ case "InMLDv2Reports":
+ procSnmp6.Icmp6.InMLDv2Reports = value
+ case "OutDestUnreachs":
+ procSnmp6.Icmp6.OutDestUnreachs = value
+ case "OutPktTooBigs":
+ procSnmp6.Icmp6.OutPktTooBigs = value
+ case "OutTimeExcds":
+ procSnmp6.Icmp6.OutTimeExcds = value
+ case "OutParmProblems":
+ procSnmp6.Icmp6.OutParmProblems = value
+ case "OutEchos":
+ procSnmp6.Icmp6.OutEchos = value
+ case "OutEchoReplies":
+ procSnmp6.Icmp6.OutEchoReplies = value
+ case "OutGroupMembQueries":
+ procSnmp6.Icmp6.OutGroupMembQueries = value
+ case "OutGroupMembResponses":
+ procSnmp6.Icmp6.OutGroupMembResponses = value
+ case "OutGroupMembReductions":
+ procSnmp6.Icmp6.OutGroupMembReductions = value
+ case "OutRouterSolicits":
+ procSnmp6.Icmp6.OutRouterSolicits = value
+ case "OutRouterAdvertisements":
+ procSnmp6.Icmp6.OutRouterAdvertisements = value
+ case "OutNeighborSolicits":
+ procSnmp6.Icmp6.OutNeighborSolicits = value
+ case "OutNeighborAdvertisements":
+ procSnmp6.Icmp6.OutNeighborAdvertisements = value
+ case "OutRedirects":
+ procSnmp6.Icmp6.OutRedirects = value
+ case "OutMLDv2Reports":
+ procSnmp6.Icmp6.OutMLDv2Reports = value
+ case "InType1":
+ procSnmp6.Icmp6.InType1 = value
+ case "InType134":
+ procSnmp6.Icmp6.InType134 = value
+ case "InType135":
+ procSnmp6.Icmp6.InType135 = value
+ case "InType136":
+ procSnmp6.Icmp6.InType136 = value
+ case "InType143":
+ procSnmp6.Icmp6.InType143 = value
+ case "OutType133":
+ procSnmp6.Icmp6.OutType133 = value
+ case "OutType135":
+ procSnmp6.Icmp6.OutType135 = value
+ case "OutType136":
+ procSnmp6.Icmp6.OutType136 = value
+ case "OutType143":
+ procSnmp6.Icmp6.OutType143 = value
+ }
+ case "Udp6":
+ switch key {
+ case "InDatagrams":
+ procSnmp6.Udp6.InDatagrams = value
+ case "NoPorts":
+ procSnmp6.Udp6.NoPorts = value
+ case "InErrors":
+ procSnmp6.Udp6.InErrors = value
+ case "OutDatagrams":
+ procSnmp6.Udp6.OutDatagrams = value
+ case "RcvbufErrors":
+ procSnmp6.Udp6.RcvbufErrors = value
+ case "SndbufErrors":
+ procSnmp6.Udp6.SndbufErrors = value
+ case "InCsumErrors":
+ procSnmp6.Udp6.InCsumErrors = value
+ case "IgnoredMulti":
+ procSnmp6.Udp6.IgnoredMulti = value
+ }
+ case "UdpLite6":
+ switch key {
+ case "InDatagrams":
+ procSnmp6.UdpLite6.InDatagrams = value
+ case "NoPorts":
+ procSnmp6.UdpLite6.NoPorts = value
+ case "InErrors":
+ procSnmp6.UdpLite6.InErrors = value
+ case "OutDatagrams":
+ procSnmp6.UdpLite6.OutDatagrams = value
+ case "RcvbufErrors":
+ procSnmp6.UdpLite6.RcvbufErrors = value
+ case "SndbufErrors":
+ procSnmp6.UdpLite6.SndbufErrors = value
+ case "InCsumErrors":
+ procSnmp6.UdpLite6.InCsumErrors = value
+ }
+ }
+ }
+ }
+ return procSnmp6, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 8c7b6e80a3..06c556ef96 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -81,10 +81,10 @@ type ProcStat struct {
STime uint
// Amount of time that this process's waited-for children have been
// scheduled in user mode, measured in clock ticks.
- CUTime uint
+ CUTime int
// Amount of time that this process's waited-for children have been
// scheduled in kernel mode, measured in clock ticks.
- CSTime uint
+ CSTime int
// For processes running a real-time scheduling policy, this is the negated
// scheduling priority, minus one.
Priority int
@@ -115,7 +115,7 @@ type ProcStat struct {
// NewStat returns the current status information of the process.
//
-// Deprecated: use p.Stat() instead
+// Deprecated: Use p.Stat() instead.
func (p Proc) NewStat() (ProcStat, error) {
return p.Stat()
}
@@ -141,6 +141,11 @@ func (p Proc) Stat() (ProcStat, error) {
}
s.Comm = string(data[l+1 : r])
+
+ // Check the following resources for the details about the particular stat
+ // fields and their data types:
+ // * https://man7.org/linux/man-pages/man5/proc.5.html
+ // * https://man7.org/linux/man-pages/man3/scanf.3.html
_, err = fmt.Fscan(
bytes.NewBuffer(data[r+2:]),
&s.State,
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 6edd8333b3..594022ded4 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -33,37 +33,37 @@ type ProcStatus struct {
TGID int
// Peak virtual memory size.
- VmPeak uint64 // nolint:golint
+ VmPeak uint64 // nolint:revive
// Virtual memory size.
- VmSize uint64 // nolint:golint
+ VmSize uint64 // nolint:revive
// Locked memory size.
- VmLck uint64 // nolint:golint
+ VmLck uint64 // nolint:revive
// Pinned memory size.
- VmPin uint64 // nolint:golint
+ VmPin uint64 // nolint:revive
// Peak resident set size.
- VmHWM uint64 // nolint:golint
+ VmHWM uint64 // nolint:revive
// Resident set size (sum of RssAnnon RssFile and RssShmem).
- VmRSS uint64 // nolint:golint
+ VmRSS uint64 // nolint:revive
// Size of resident anonymous memory.
- RssAnon uint64 // nolint:golint
+ RssAnon uint64 // nolint:revive
// Size of resident file mappings.
- RssFile uint64 // nolint:golint
+ RssFile uint64 // nolint:revive
// Size of resident shared memory.
- RssShmem uint64 // nolint:golint
+ RssShmem uint64 // nolint:revive
// Size of data segments.
- VmData uint64 // nolint:golint
+ VmData uint64 // nolint:revive
// Size of stack segments.
- VmStk uint64 // nolint:golint
+ VmStk uint64 // nolint:revive
// Size of text segments.
- VmExe uint64 // nolint:golint
+ VmExe uint64 // nolint:revive
// Shared library code size.
- VmLib uint64 // nolint:golint
+ VmLib uint64 // nolint:revive
// Page table entries size.
- VmPTE uint64 // nolint:golint
+ VmPTE uint64 // nolint:revive
// Size of second-level page tables.
- VmPMD uint64 // nolint:golint
+ VmPMD uint64 // nolint:revive
// Swapped-out virtual memory size by anonymous private.
- VmSwap uint64 // nolint:golint
+ VmSwap uint64 // nolint:revive
// Size of hugetlb memory portions
HugetlbPages uint64
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
new file mode 100644
index 0000000000..d46533ebf4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+func sysctlToPath(sysctl string) string {
+ return strings.Replace(sysctl, ".", "/", -1)
+}
+
+func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
+ value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
+ if err != nil {
+ return nil, err
+ }
+ return strings.Fields(value), nil
+
+}
+
+func (fs FS) SysctlInts(sysctl string) ([]int, error) {
+ fields, err := fs.SysctlStrings(sysctl)
+ if err != nil {
+ return nil, err
+ }
+
+ values := make([]int, len(fields))
+ for i, f := range fields {
+ vp := util.NewValueParser(f)
+ values[i] = vp.Int()
+ if err := vp.Err(); err != nil {
+ return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err)
+ }
+ }
+ return values, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
index 28228164ef..5f7f32dc83 100644
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -40,7 +40,7 @@ type Schedstat struct {
CPUs []*SchedstatCPU
}
-// SchedstatCPU contains the values from one "cpu<N>" line
+// SchedstatCPU contains the values from one "cpu<N>" line.
type SchedstatCPU struct {
CPUNum string
@@ -49,14 +49,14 @@ type SchedstatCPU struct {
RunTimeslices uint64
}
-// ProcSchedstat contains the values from /proc/<pid>/schedstat
+// ProcSchedstat contains the values from `/proc/<pid>/schedstat`.
type ProcSchedstat struct {
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
-// Schedstat reads data from /proc/schedstat
+// Schedstat reads data from `/proc/schedstat`.
func (fs FS) Schedstat() (*Schedstat, error) {
file, err := os.Open(fs.proc.Path("schedstat"))
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
index 7896fd7242..bc9aaf5c28 100644
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -137,7 +137,7 @@ func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
return s, nil
}
-// SlabInfo reads data from /proc/slabinfo
+// SlabInfo reads data from `/proc/slabinfo`.
func (fs FS) SlabInfo() (SlabInfo, error) {
// TODO: Consider passing options to allow for parsing different
// slabinfo versions. However, slabinfo 2.1 has been stable since
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
new file mode 100644
index 0000000000..559129cbca
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Softirqs represents the softirq statistics.
+type Softirqs struct {
+ Hi []uint64
+ Timer []uint64
+ NetTx []uint64
+ NetRx []uint64
+ Block []uint64
+ IRQPoll []uint64
+ Tasklet []uint64
+ Sched []uint64
+ HRTimer []uint64
+ RCU []uint64
+}
+
+func (fs FS) Softirqs() (Softirqs, error) {
+ fileName := fs.proc.Path("softirqs")
+ data, err := util.ReadFileNoStat(fileName)
+ if err != nil {
+ return Softirqs{}, err
+ }
+
+ reader := bytes.NewReader(data)
+
+ return parseSoftirqs(reader)
+}
+
+func parseSoftirqs(r io.Reader) (Softirqs, error) {
+ var (
+ softirqs = Softirqs{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return Softirqs{}, fmt.Errorf("softirqs empty")
+ }
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ var err error
+
+ // require at least one cpu
+ if len(parts) < 2 {
+ continue
+ }
+ switch {
+ case parts[0] == "HI:":
+ perCPU := parts[1:]
+ softirqs.Hi = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "TIMER:":
+ perCPU := parts[1:]
+ softirqs.Timer = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "NET_TX:":
+ perCPU := parts[1:]
+ softirqs.NetTx = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "NET_RX:":
+ perCPU := parts[1:]
+ softirqs.NetRx = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "BLOCK:":
+ perCPU := parts[1:]
+ softirqs.Block = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "IRQ_POLL:":
+ perCPU := parts[1:]
+ softirqs.IRQPoll = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "TASKLET:":
+ perCPU := parts[1:]
+ softirqs.Tasklet = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "SCHED:":
+ perCPU := parts[1:]
+ softirqs.Sched = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "HRTIMER:":
+ perCPU := parts[1:]
+ softirqs.HRTimer = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err)
+ }
+ }
+ case parts[0] == "RCU:":
+ perCPU := parts[1:]
+ softirqs.RCU = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err)
+ }
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err)
+ }
+
+ return softirqs, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 6d8727541e..33f97caa08 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -41,7 +41,7 @@ type CPUStat struct {
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
-// It is possible to get per-cpu stats by reading /proc/softirqs
+// It is possible to get per-cpu stats by reading `/proc/softirqs`.
type SoftIRQStat struct {
Hi uint64
Timer uint64
@@ -145,7 +145,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
// NewStat returns information about current cpu/process statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
//
-// Deprecated: use fs.Stat() instead
+// Deprecated: Use fs.Stat() instead.
func NewStat() (Stat, error) {
fs, err := NewFS(fs.DefaultProcMountPoint)
if err != nil {
@@ -155,15 +155,15 @@ func NewStat() (Stat, error) {
}
// NewStat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
//
-// Deprecated: use fs.Stat() instead
+// Deprecated: Use fs.Stat() instead.
func (fs FS) NewStat() (Stat, error) {
return fs.Stat()
}
// Stat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) Stat() (Stat, error) {
fileName := fs.proc.Path("stat")
data, err := util.ReadFileNoStat(fileName)
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
index cb13891414..20ceb77e2d 100644
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -11,13 +11,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -29,7 +29,7 @@ import (
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
-// and numa_zonelist_order (deprecated) which is a string
+// and numa_zonelist_order (deprecated) which is a string.
type VM struct {
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
BlockDump *int64 // /proc/sys/vm/block_dump
@@ -87,7 +87,7 @@ func (fs FS) VM() (*VM, error) {
return nil, fmt.Errorf("%s is not a directory", path)
}
- files, err := ioutil.ReadDir(path)
+ files, err := os.ReadDir(path)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
index 209e2ac987..c745a4c04f 100644
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
@@ -18,7 +19,7 @@ package procfs
import (
"bytes"
"fmt"
- "io/ioutil"
+ "os"
"regexp"
"strings"
@@ -72,7 +73,7 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
// structs containing the relevant info. More information available here:
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
+ data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
}
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
index 0d6e61793a..439d3e1de4 100644
--- a/vendor/github.com/spf13/cobra/.golangci.yml
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -1,3 +1,17 @@
+# Copyright 2013-2022 The Cobra Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
run:
deadline: 5m
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
deleted file mode 100644
index e0a3b50043..0000000000
--- a/vendor/github.com/spf13/cobra/.travis.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-language: go
-
-stages:
- - test
- - build
-
-go:
- - 1.12.x
- - 1.13.x
- - tip
-
-env: GO111MODULE=on
-
-before_install:
- - go get -u github.com/kyoh86/richgo
- - go get -u github.com/mitchellh/gox
- - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest
-
-matrix:
- allow_failures:
- - go: tip
- include:
- - stage: build
- go: 1.13.x
- script: make cobra_generator
-
-script:
- - make test
diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md
deleted file mode 100644
index 8a23b4f851..0000000000
--- a/vendor/github.com/spf13/cobra/CHANGELOG.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Cobra Changelog
-
-## v1.1.3
-
-* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility
-
-## v1.1.2
-
-### Notable Changes
-
-* Bump license year to 2021 in golden files (#1309) @Bowbaq
-* Enhance PowerShell completion with custom comp (#1208) @Luap99
-* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670
-* Documentation readability improvements (#1228 etc.) @zaataylor etc.
-* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor
-
-## v1.1.1
-
-* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context.
-* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context.
-
-## v1.1.0
-
-### Notable Changes
-
-* Extend Go completions and revamp zsh comp (#1070)
-* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb
-* Add completion for help command (#1136)
-* Complete subcommands when TraverseChildren is set (#1171)
-* Fix stderr printing functions (#894)
-* fix: fish output redirection (#1247)
-
-## v1.0.0
-
-Announcing v1.0.0 of Cobra. 🎉
-
-### Notable Changes
-* Fish completion (including support for Go custom completion) @marckhouzam
-* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam
-* Remove/replace SetOutput on Command - deprecated @jpmcb
-* add support for autolabel stale PR @xchapter7x
-* Add Labeler Actions @xchapter7x
-* Custom completions coded in Go (instead of Bash) @marckhouzam
-* Partial Revert of #922 @jharshman
-* Add Makefile to project @jharshman
-* Correct documentation for InOrStdin @desponda
-* Apply formatting to templates @jharshman
-* Revert change so help is printed on stdout again @marckhouzam
-* Update md2man to v2.0.0 @pdf
-* update viper to v1.4.0 @umarcor
-* Update cmd/root.go example in README.md @jharshman
diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS
new file mode 100644
index 0000000000..4c5ac3dd99
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/MAINTAINERS
@@ -0,0 +1,13 @@
+maintainers:
+- spf13
+- johnSchnake
+- jpmcb
+- marckhouzam
+inactive:
+- anthonyfok
+- bep
+- bogem
+- broady
+- eparis
+- jharshman
+- wfernandes
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
index 472c73bf16..c433a01bce 100644
--- a/vendor/github.com/spf13/cobra/Makefile
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -6,14 +6,14 @@ $(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://insta
endif
ifeq (, $(shell which richgo))
-$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
+$(warning "could not find richgo in $(PATH), run: go install github.com/kyoh86/richgo@latest")
endif
-.PHONY: fmt lint test cobra_generator install_deps clean
+.PHONY: fmt lint test install_deps clean
default: all
-all: fmt test cobra_generator
+all: fmt test
fmt:
$(info ******************** checking formatting ********************)
@@ -23,15 +23,10 @@ lint:
$(info ******************** running lint tools ********************)
golangci-lint run -v
-test: install_deps lint
+test: install_deps
$(info ******************** running tests ********************)
richgo test -v ./...
-cobra_generator: install_deps
- $(info ******************** building generator ********************)
- mkdir -p $(BIN)
- make -C cobra all
-
install_deps:
$(info ******************** downloading dependencies ********************)
go get -v ./...
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index a1b13ddda6..7cc726beb4 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -1,61 +1,35 @@
![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
-Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+Cobra is a library for creating powerful modern CLI applications.
-Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/),
-[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to
+Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
+[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
-[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
-[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199)
-# Table of Contents
-
-- [Overview](#overview)
-- [Concepts](#concepts)
- * [Commands](#commands)
- * [Flags](#flags)
-- [Installing](#installing)
-- [Getting Started](#getting-started)
- * [Using the Cobra Generator](#using-the-cobra-generator)
- * [Using the Cobra Library](#using-the-cobra-library)
- * [Working with Flags](#working-with-flags)
- * [Positional and Custom Arguments](#positional-and-custom-arguments)
- * [Example](#example)
- * [Help Command](#help-command)
- * [Usage Message](#usage-message)
- * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
- * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
- * [Generating documentation for your command](#generating-documentation-for-your-command)
- * [Generating shell completions](#generating-shell-completions)
-- [Contributing](CONTRIBUTING.md)
-- [License](#license)
-
# Overview
Cobra is a library providing a simple interface to create powerful modern CLI
interfaces similar to git & go tools.
-Cobra is also an application that will generate your application scaffolding to rapidly
-develop a Cobra-based application.
-
Cobra provides:
* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
* Fully POSIX-compliant flags (including short & long versions)
* Nested subcommands
* Global, local and cascading flags
-* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
* Intelligent suggestions (`app srver`... did you mean `app server`?)
* Automatic help generation for commands and flags
+* Grouping help for subcommands
* Automatic help flag recognition of `-h`, `--help`, etc.
* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
* Automatically generated man pages for your application
* Command aliases so you can change things without breaking them
* The flexibility to define your own help, usage, etc.
-* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps
# Concepts
@@ -67,9 +41,9 @@ The best applications read like sentences when used, and as a result, users
intuitively know how to interact with them.
The pattern to follow is
-`APPNAME VERB NOUN --ADJECTIVE.`
+`APPNAME VERB NOUN --ADJECTIVE`
or
-`APPNAME COMMAND ARG --FLAG`
+`APPNAME COMMAND ARG --FLAG`.
A few good real world examples may better illustrate this point.
@@ -89,7 +63,7 @@ have children commands and optionally run an action.
In the example above, 'server' is the command.
-[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command)
## Flags
@@ -106,654 +80,32 @@ which maintains the same interface while adding POSIX compliance.
# Installing
Using Cobra is easy. First, use `go get` to install the latest version
-of the library. This command will install the `cobra` generator executable
-along with the library and its dependencies:
-
- go get -u github.com/spf13/cobra
-
-Next, include Cobra in your application:
-
-```go
-import "github.com/spf13/cobra"
-```
-
-# Getting Started
-
-While you are welcome to provide your own organization, typically a Cobra-based
-application will follow the following organizational structure:
-
-```
- ▾ appName/
- ▾ cmd/
- add.go
- your.go
- commands.go
- here.go
- main.go
-```
-
-In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
-
-```go
-package main
-
-import (
- "{pathToYourApp}/cmd"
-)
-
-func main() {
- cmd.Execute()
-}
-```
-
-## Using the Cobra Generator
-
-Cobra provides its own program that will create your application and add any
-commands you want. It's the easiest way to incorporate Cobra into your application.
-
-[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
-
-## Using the Cobra Library
-
-To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
-You will optionally provide additional commands as you see fit.
-
-### Create rootCmd
-
-Cobra doesn't require any special constructors. Simply create your commands.
-
-Ideally you place this in app/cmd/root.go:
-
-```go
-var rootCmd = &cobra.Command{
- Use: "hugo",
- Short: "Hugo is a very fast static site generator",
- Long: `A Fast and Flexible Static Site Generator built with
- love by spf13 and friends in Go.
- Complete documentation is available at http://hugo.spf13.com`,
- Run: func(cmd *cobra.Command, args []string) {
- // Do Stuff Here
- },
-}
-
-func Execute() {
- if err := rootCmd.Execute(); err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-}
-```
-
-You will additionally define flags and handle configuration in your init() function.
-
-For example cmd/root.go:
-
-```go
-package cmd
-
-import (
- "fmt"
- "os"
-
- homedir "github.com/mitchellh/go-homedir"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- // Used for flags.
- cfgFile string
- userLicense string
-
- rootCmd = &cobra.Command{
- Use: "cobra",
- Short: "A generator for Cobra based Applications",
- Long: `Cobra is a CLI library for Go that empowers applications.
-This application is a tool to generate the needed files
-to quickly create a Cobra application.`,
- }
-)
-
-// Execute executes the root command.
-func Execute() error {
- return rootCmd.Execute()
-}
-
-func init() {
- cobra.OnInitialize(initConfig)
-
- rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
- rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
- rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
- rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
- viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
- viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
- viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
- viper.SetDefault("license", "apache")
-
- rootCmd.AddCommand(addCmd)
- rootCmd.AddCommand(initCmd)
-}
-
-func initConfig() {
- if cfgFile != "" {
- // Use config file from the flag.
- viper.SetConfigFile(cfgFile)
- } else {
- // Find home directory.
- home, err := homedir.Dir()
- cobra.CheckErr(err)
-
- // Search config in home directory with name ".cobra" (without extension).
- viper.AddConfigPath(home)
- viper.SetConfigName(".cobra")
- }
-
- viper.AutomaticEnv()
-
- if err := viper.ReadInConfig(); err == nil {
- fmt.Println("Using config file:", viper.ConfigFileUsed())
- }
-}
-```
-
-### Create your main.go
-
-With the root command you need to have your main function execute it.
-Execute should be run on the root for clarity, though it can be called on any command.
-
-In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra.
-
-```go
-package main
-
-import (
- "{pathToYourApp}/cmd"
-)
-
-func main() {
- cmd.Execute()
-}
-```
-
-### Create additional commands
-
-Additional commands can be defined and typically are each given their own file
-inside of the cmd/ directory.
-
-If you wanted to create a version command you would create cmd/version.go and
-populate it with the following:
-
-```go
-package cmd
-
-import (
- "fmt"
-
- "github.com/spf13/cobra"
-)
-
-func init() {
- rootCmd.AddCommand(versionCmd)
-}
-
-var versionCmd = &cobra.Command{
- Use: "version",
- Short: "Print the version number of Hugo",
- Long: `All software has versions. This is Hugo's`,
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
- },
-}
-```
-
-### Returning and handling errors
-
-If you wish to return an error to the caller of a command, `RunE` can be used.
-
-```go
-package cmd
-
-import (
- "fmt"
-
- "github.com/spf13/cobra"
-)
-
-func init() {
- rootCmd.AddCommand(tryCmd)
-}
-
-var tryCmd = &cobra.Command{
- Use: "try",
- Short: "Try and possibly fail at something",
- RunE: func(cmd *cobra.Command, args []string) error {
- if err := someFunc(); err != nil {
- return err
- }
- return nil
- },
-}
-```
-
-The error can then be caught at the execute function call.
-
-## Working with Flags
-
-Flags provide modifiers to control how the action command operates.
-
-### Assign flags to a command
-
-Since the flags are defined and used in different locations, we need to
-define a variable outside with the correct scope to assign the flag to
-work with.
-
-```go
-var Verbose bool
-var Source string
-```
-
-There are two different approaches to assign a flag.
-
-### Persistent Flags
-
-A flag can be 'persistent', meaning that this flag will be available to the
-command it's assigned to as well as every command under that command. For
-global flags, assign a flag as a persistent flag on the root.
-
-```go
-rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
-```
-
-### Local Flags
-
-A flag can also be assigned locally, which will only apply to that specific command.
-
-```go
-localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
-```
-
-### Local Flag on Parent Commands
-
-By default, Cobra only parses local flags on the target command, and any local flags on
-parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will
-parse local flags on each command before executing the target command.
-
-```go
-command := cobra.Command{
- Use: "print [OPTIONS] [COMMANDS]",
- TraverseChildren: true,
-}
-```
-
-### Bind Flags with Config
-
-You can also bind your flags with [viper](https://github.com/spf13/viper):
-```go
-var author string
+of the library.
-func init() {
- rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
- viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
-}
```
-
-In this example, the persistent flag `author` is bound with `viper`.
-**Note**: the variable `author` will not be set to the value from config,
-when the `--author` flag is not provided by user.
-
-More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
-
-### Required flags
-
-Flags are optional by default. If instead you wish your command to report an error
-when a flag has not been set, mark it as required:
-```go
-rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
-rootCmd.MarkFlagRequired("region")
-```
-
-Or, for persistent flags:
-```go
-rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
-rootCmd.MarkPersistentFlagRequired("region")
-```
-
-## Positional and Custom Arguments
-
-Validation of positional arguments can be specified using the `Args` field
-of `Command`.
-
-The following validators are built in:
-
-- `NoArgs` - the command will report an error if there are any positional args.
-- `ArbitraryArgs` - the command will accept any args.
-- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
-- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
-- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
-- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
-- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
-- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
-
-An example of setting the custom validator:
-
-```go
-var cmd = &cobra.Command{
- Short: "hello",
- Args: func(cmd *cobra.Command, args []string) error {
- if len(args) < 1 {
- return errors.New("requires a color argument")
- }
- if myapp.IsValidColor(args[0]) {
- return nil
- }
- return fmt.Errorf("invalid color specified: %s", args[0])
- },
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Hello, World!")
- },
-}
-```
-
-## Example
-
-In the example below, we have defined three commands. Two are at the top level
-and one (cmdTimes) is a child of one of the top commands. In this case the root
-is not executable, meaning that a subcommand is required. This is accomplished
-by not providing a 'Run' for the 'rootCmd'.
-
-We have only defined one flag for a single command.
-
-More documentation about flags is available at https://github.com/spf13/pflag
-
-```go
-package main
-
-import (
- "fmt"
- "strings"
-
- "github.com/spf13/cobra"
-)
-
-func main() {
- var echoTimes int
-
- var cmdPrint = &cobra.Command{
- Use: "print [string to print]",
- Short: "Print anything to the screen",
- Long: `print is for printing anything back to the screen.
-For many years people have printed back to the screen.`,
- Args: cobra.MinimumNArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Print: " + strings.Join(args, " "))
- },
- }
-
- var cmdEcho = &cobra.Command{
- Use: "echo [string to echo]",
- Short: "Echo anything to the screen",
- Long: `echo is for echoing anything back.
-Echo works a lot like print, except it has a child command.`,
- Args: cobra.MinimumNArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Echo: " + strings.Join(args, " "))
- },
- }
-
- var cmdTimes = &cobra.Command{
- Use: "times [string to echo]",
- Short: "Echo anything to the screen more times",
- Long: `echo things multiple times back to the user by providing
-a count and a string.`,
- Args: cobra.MinimumNArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- for i := 0; i < echoTimes; i++ {
- fmt.Println("Echo: " + strings.Join(args, " "))
- }
- },
- }
-
- cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
-
- var rootCmd = &cobra.Command{Use: "app"}
- rootCmd.AddCommand(cmdPrint, cmdEcho)
- cmdEcho.AddCommand(cmdTimes)
- rootCmd.Execute()
-}
-```
-
-For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
-
-## Help Command
-
-Cobra automatically adds a help command to your application when you have subcommands.
-This will be called when a user runs 'app help'. Additionally, help will also
-support all other commands as input. Say, for instance, you have a command called
-'create' without any additional configuration; Cobra will work when 'app help
-create' is called. Every command will automatically have the '--help' flag added.
-
-### Example
-
-The following output is automatically generated by Cobra. Nothing beyond the
-command and flag definitions are needed.
-
- $ cobra help
-
- Cobra is a CLI library for Go that empowers applications.
- This application is a tool to generate the needed files
- to quickly create a Cobra application.
-
- Usage:
- cobra [command]
-
- Available Commands:
- add Add a command to a Cobra Application
- help Help about any command
- init Initialize a Cobra Application
-
- Flags:
- -a, --author string author name for copyright attribution (default "YOUR NAME")
- --config string config file (default is $HOME/.cobra.yaml)
- -h, --help help for cobra
- -l, --license string name of license for the project
- --viper use Viper for configuration (default true)
-
- Use "cobra [command] --help" for more information about a command.
-
-
-Help is just a command like any other. There is no special logic or behavior
-around it. In fact, you can provide your own if you want.
-
-### Defining your own help
-
-You can provide your own Help command or your own template for the default command to use
-with following functions:
-
-```go
-cmd.SetHelpCommand(cmd *Command)
-cmd.SetHelpFunc(f func(*Command, []string))
-cmd.SetHelpTemplate(s string)
+go get -u github.com/spf13/cobra@latest
```
-The latter two will also apply to any children commands.
-
-## Usage Message
-
-When the user provides an invalid flag or invalid command, Cobra responds by
-showing the user the 'usage'.
-
-### Example
-You may recognize this from the help above. That's because the default help
-embeds the usage as part of its output.
-
- $ cobra --invalid
- Error: unknown flag: --invalid
- Usage:
- cobra [command]
-
- Available Commands:
- add Add a command to a Cobra Application
- help Help about any command
- init Initialize a Cobra Application
-
- Flags:
- -a, --author string author name for copyright attribution (default "YOUR NAME")
- --config string config file (default is $HOME/.cobra.yaml)
- -h, --help help for cobra
- -l, --license string name of license for the project
- --viper use Viper for configuration (default true)
-
- Use "cobra [command] --help" for more information about a command.
-
-### Defining your own usage
-You can provide your own usage function or template for Cobra to use.
-Like help, the function and template are overridable through public methods:
-
-```go
-cmd.SetUsageFunc(f func(*Command) error)
-cmd.SetUsageTemplate(s string)
-```
-
-## Version Flag
-
-Cobra adds a top-level '--version' flag if the Version field is set on the root command.
-Running an application with the '--version' flag will print the version to stdout using
-the version template. The template can be customized using the
-`cmd.SetVersionTemplate(s string)` function.
-
-## PreRun and PostRun Hooks
-
-It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
-
-- `PersistentPreRun`
-- `PreRun`
-- `Run`
-- `PostRun`
-- `PersistentPostRun`
-
-An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
-
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/spf13/cobra"
-)
-
-func main() {
-
- var rootCmd = &cobra.Command{
- Use: "root [sub]",
- Short: "My root command",
- PersistentPreRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
- },
- PreRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
- },
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd Run with args: %v\n", args)
- },
- PostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
- },
- PersistentPostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
- },
- }
-
- var subCmd = &cobra.Command{
- Use: "sub [no options!]",
- Short: "My subcommand",
- PreRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
- },
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd Run with args: %v\n", args)
- },
- PostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
- },
- PersistentPostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
- },
- }
-
- rootCmd.AddCommand(subCmd)
-
- rootCmd.SetArgs([]string{""})
- rootCmd.Execute()
- fmt.Println()
- rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
- rootCmd.Execute()
-}
-```
-
-Output:
-```
-Inside rootCmd PersistentPreRun with args: []
-Inside rootCmd PreRun with args: []
-Inside rootCmd Run with args: []
-Inside rootCmd PostRun with args: []
-Inside rootCmd PersistentPostRun with args: []
-
-Inside rootCmd PersistentPreRun with args: [arg1 arg2]
-Inside subCmd PreRun with args: [arg1 arg2]
-Inside subCmd Run with args: [arg1 arg2]
-Inside subCmd PostRun with args: [arg1 arg2]
-Inside subCmd PersistentPostRun with args: [arg1 arg2]
-```
-
-## Suggestions when "unknown command" happens
-
-Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
-
-```
-$ hugo srever
-Error: unknown command "srever" for "hugo"
-
-Did you mean this?
- server
-
-Run 'hugo --help' for usage.
-```
-
-Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
-
-If you need to disable suggestions or tweak the string distance in your command, use:
+Next, include Cobra in your application:
```go
-command.DisableSuggestions = true
+import "github.com/spf13/cobra"
```
-or
-
-```go
-command.SuggestionsMinimumDistance = 1
-```
+# Usage
+`cobra-cli` is a command line program to generate cobra applications and command files.
+It will bootstrap your application scaffolding to rapidly
+develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application.
-You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+It can be installed by running:
```
-$ kubectl remove
-Error: unknown command "remove" for "kubectl"
-
-Did you mean this?
- delete
-
-Run 'kubectl help' for usage.
+go install github.com/spf13/cobra-cli@latest
```
-## Generating documentation for your command
-
-Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md).
-
-## Generating shell completions
+For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
-Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
+For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md).
# License
diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go
new file mode 100644
index 0000000000..95e03aecb6
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.go
@@ -0,0 +1,63 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+const (
+ activeHelpMarker = "_activeHelp_ "
+ // The below values should not be changed: programs will be using them explicitly
+ // in their user documentation, and users will be using them explicitly.
+ activeHelpEnvVarSuffix = "_ACTIVE_HELP"
+ activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP"
+ activeHelpGlobalDisable = "0"
+)
+
+// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
+// Such strings will be processed by the completion script and will be shown as ActiveHelp
+// to the user.
+// The array parameter should be the array that will contain the completions.
+// This function can be called multiple times before and/or after completions are added to
+// the array. Each time this function is called with the same array, the new
+// ActiveHelp line will be shown below the previous ones when completion is triggered.
+func AppendActiveHelp(compArray []string, activeHelpStr string) []string {
+ return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr))
+}
+
+// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
+// <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the root command in upper
+// case, with all - replaced by _.
+// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
+// is set to "0".
+func GetActiveHelpConfig(cmd *Command) string {
+ activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar)
+ if activeHelpCfg != activeHelpGlobalDisable {
+ activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name()))
+ }
+ return activeHelpCfg
+}
+
+// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
+// variable. It has the format <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the
+// root command in upper case, with all - replaced by _.
+func activeHelpEnvVar(name string) string {
+ // This format should not be changed: users will be using it explicitly.
+ activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix))
+ return strings.ReplaceAll(activeHelpEnvVar, "-", "_")
+}
diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md
new file mode 100644
index 0000000000..5e7f59af38
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.md
@@ -0,0 +1,157 @@
+# Active Help
+
+Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion.
+
+For example,
+```
+bash-5.1$ helm repo add [tab]
+You must choose a name for the repo you are adding.
+
+bash-5.1$ bin/helm package [tab]
+Please specify the path to the chart to package
+
+bash-5.1$ bin/helm package [tab][tab]
+bin/ internal/ scripts/ pkg/ testdata/
+```
+
+**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program.
+## Supported shells
+
+Active Help is currently only supported for the following shells:
+- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed.
+- Zsh
+
+## Adding Active Help messages
+
+As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md).
+
+Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details.
+
+### Active Help for nouns
+
+Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example:
+
+```go
+cmd := &cobra.Command{
+ Use: "add [NAME] [URL]",
+ Short: "add a chart repository",
+ Args: require.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return addRepo(args)
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ var comps []string
+ if len(args) == 0 {
+ comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
+ } else if len(args) == 1 {
+ comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
+ } else {
+ comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior:
+```
+bash-5.1$ helm repo add [tab]
+You must choose a name for the repo you are adding
+
+bash-5.1$ helm repo add grafana [tab]
+You must specify the URL for the repo you are adding
+
+bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab]
+This command does not take any more arguments
+```
+**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions.
+
+### Active Help for flags
+
+Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example:
+```go
+_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 2 {
+ return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp
+ }
+ return compVersionFlag(args[1], toComplete)
+ })
+```
+The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag.
+```
+bash-5.1$ bin/helm install myrelease --version 2.0.[tab]
+You must first specify the chart to install before the --version flag can be completed
+
+bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab]
+2.0.1 2.0.2 2.0.3
+```
+
+## User control of Active Help
+
+You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any.
+Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration.
+
+The way to configure Active Help is to use the program's Active Help environment
+variable. That variable is named `<PROGRAM>_ACTIVE_HELP` where `<PROGRAM>` is the name of your
+program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever
+Active Help configuration values are supported by the program.
+
+For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user
+would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell.
+
+For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the
+Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages
+should or should not be added (instead of reading the environment variable directly).
+
+For example:
+```go
+ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ activeHelpLevel := cobra.GetActiveHelpConfig(cmd)
+
+ var comps []string
+ if len(args) == 0 {
+ if activeHelpLevel != "off" {
+ comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
+ }
+ } else if len(args) == 1 {
+ if activeHelpLevel != "off" {
+ comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
+ }
+ } else {
+ if activeHelpLevel == "local" {
+ comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
+ }
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp
+},
+```
+**Note 1**: If the `<PROGRAM>_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly.
+
+**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `<PROGRAM>_ACTIVE_HELP` is set to.
+
+**Note 3**: If the user does not set `<PROGRAM>_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string.
+## Active Help with Cobra's default completion command
+
+Cobra provides a default `completion` command for programs that wish to use it.
+When using the default `completion` command, Active Help is configurable in the same
+fashion as described above using environment variables. You may wish to document this in more
+details for your users.
+
+## Debugging Active Help
+
+Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details.
+
+When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `<PROGRAM>_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below:
+```
+$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h<ENTER>
+bitnami/haproxy
+bitnami/harbor
+_activeHelp_ WARNING: cannot re-use a name that is still in use
+:0
+Completion ended with directive: ShellCompDirectiveDefault
+
+$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h<ENTER>
+bitnami/haproxy
+bitnami/harbor
+:0
+Completion ended with directive: ShellCompDirectiveDefault
+```
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
index 70e9b26291..2c1f99e787 100644
--- a/vendor/github.com/spf13/cobra/args.go
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -1,3 +1,17 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package cobra
import (
@@ -32,7 +46,8 @@ func NoArgs(cmd *Command, args []string) error {
return nil
}
-// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+// OnlyValidArgs returns an error if there are any positional args that are not in
+// the `ValidArgs` field of `Command`
func OnlyValidArgs(cmd *Command, args []string) error {
if len(cmd.ValidArgs) > 0 {
// Remove any description that may be included in ValidArgs.
@@ -41,7 +56,6 @@ func OnlyValidArgs(cmd *Command, args []string) error {
for _, v := range cmd.ValidArgs {
validArgs = append(validArgs, strings.Split(v, "\t")[0])
}
-
for _, v := range args {
if !stringInSlice(v, validArgs) {
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
@@ -86,24 +100,32 @@ func ExactArgs(n int) PositionalArgs {
}
}
-// ExactValidArgs returns an error if
-// there are not exactly N positional args OR
-// there are any positional args that are not in the `ValidArgs` field of `Command`
-func ExactValidArgs(n int) PositionalArgs {
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
return func(cmd *Command, args []string) error {
- if err := ExactArgs(n)(cmd, args); err != nil {
- return err
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
}
- return OnlyValidArgs(cmd, args)
+ return nil
}
}
-// RangeArgs returns an error if the number of args is not within the expected range.
-func RangeArgs(min int, max int) PositionalArgs {
+// MatchAll allows combining several PositionalArgs to work in concert.
+func MatchAll(pargs ...PositionalArgs) PositionalArgs {
return func(cmd *Command, args []string) error {
- if len(args) < min || len(args) > max {
- return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ for _, parg := range pargs {
+ if err := parg(cmd, args); err != nil {
+ return err
+ }
}
return nil
}
}
+
+// ExactValidArgs returns an error if there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+//
+// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead
+func ExactValidArgs(n int) PositionalArgs {
+ return MatchAll(ExactArgs(n), OnlyValidArgs)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 7106147937..3acdb27974 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -1,3 +1,17 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package cobra
import (
@@ -24,7 +38,7 @@ func writePreamble(buf io.StringWriter, name string) {
WriteStringAndCheck(buf, fmt.Sprintf(`
__%[1]s_debug()
{
- if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
@@ -73,7 +87,8 @@ __%[1]s_handle_go_custom_completion()
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly %[1]s allows to handle aliases
args=("${words[@]:1}")
- requestComp="${words[0]} %[2]s ${args[*]}"
+ # Disable ActiveHelp which is not supported for bash completion v1
+ requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
@@ -99,7 +114,7 @@ __%[1]s_handle_go_custom_completion()
directive=0
fi
__%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
- __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
# Error code. No completion.
@@ -125,7 +140,7 @@ __%[1]s_handle_go_custom_completion()
local fullFilter filter filteringCmd
# Do not use quotes around the $out variable or else newline
# characters will be kept.
- for filter in ${out[*]}; do
+ for filter in ${out}; do
fullFilter+="$filter|"
done
@@ -134,9 +149,9 @@ __%[1]s_handle_go_custom_completion()
$filteringCmd
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
- local subDir
+ local subdir
# Use printf to strip any trailing newline
- subdir=$(printf "%%s" "${out[0]}")
+ subdir=$(printf "%%s" "${out}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
__%[1]s_handle_subdirs_in_dir_flag "$subdir"
@@ -147,7 +162,7 @@ __%[1]s_handle_go_custom_completion()
else
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
- done < <(compgen -W "${out[*]}" -- "$cur")
+ done < <(compgen -W "${out}" -- "$cur")
fi
}
@@ -187,13 +202,19 @@ __%[1]s_handle_reply()
PREFIX=""
cur="${cur#*=}"
${flags_completion[${index}]}
- if [ -n "${ZSH_VERSION}" ]; then
+ if [ -n "${ZSH_VERSION:-}" ]; then
# zsh completion needs --flag= prefix
eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
fi
fi
fi
- return 0;
+
+ if [[ -z "${flag_parsing_disabled}" ]]; then
+ # If flag parsing is enabled, we have completed the flags and can return.
+ # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough
+ # to possibly call handle_go_custom_completion.
+ return 0;
+ fi
;;
esac
@@ -232,13 +253,13 @@ __%[1]s_handle_reply()
fi
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
- if declare -F __%[1]s_custom_func >/dev/null; then
- # try command name qualified custom func
- __%[1]s_custom_func
- else
- # otherwise fall back to unqualified for compatibility
- declare -F __custom_func >/dev/null && __custom_func
- fi
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
fi
# available in bash-completion >= 2, not always present on macOS
@@ -272,7 +293,7 @@ __%[1]s_handle_flag()
# if a command required a flag, and we found it, unset must_have_one_flag()
local flagname=${words[c]}
- local flagvalue
+ local flagvalue=""
# if the word contained an =
if [[ ${words[c]} == *"="* ]]; then
flagvalue=${flagname#*=} # take in as flagvalue after the =
@@ -291,7 +312,7 @@ __%[1]s_handle_flag()
# keep flag value with flagname as flaghash
# flaghash variable is an associative array which is only supported in bash > 3.
- if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
if [ -n "${flagvalue}" ] ; then
flaghash[${flagname}]=${flagvalue}
elif [ -n "${words[ $((c+1)) ]}" ] ; then
@@ -303,7 +324,7 @@ __%[1]s_handle_flag()
# skip the argument to a two word flag
if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
- __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
c=$((c+1))
# if we are looking for a flags value, don't show commands
if [[ $c -eq $cword ]]; then
@@ -363,7 +384,7 @@ __%[1]s_handle_word()
__%[1]s_handle_command
elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
# aliashash variable is an associative array which is only supported in bash > 3.
- if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
words[c]=${aliashash[${words[c]}]}
__%[1]s_handle_command
else
@@ -377,14 +398,14 @@ __%[1]s_handle_word()
`, name, ShellCompNoDescRequestCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
func writePostscript(buf io.StringWriter, name string) {
- name = strings.Replace(name, ":", "__", -1)
+ name = strings.ReplaceAll(name, ":", "__")
WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
WriteStringAndCheck(buf, fmt.Sprintf(`{
- local cur prev words cword
+ local cur prev words cword split
declare -A flaghash 2>/dev/null || :
declare -A aliashash 2>/dev/null || :
if declare -F _init_completion >/dev/null 2>&1; then
@@ -394,17 +415,20 @@ func writePostscript(buf io.StringWriter, name string) {
fi
local c=0
+ local flag_parsing_disabled=
local flags=()
local two_word_flags=()
local local_nonpersistent_flags=()
local flags_with_completion=()
local flags_completion=()
local commands=("%[1]s")
+ local command_aliases=()
local must_have_one_flag=()
local must_have_one_noun=()
- local has_completion_function
- local last_command
+ local has_completion_function=""
+ local last_command=""
local nouns=()
+ local noun_aliases=()
__%[1]s_handle_word
}
@@ -510,6 +534,8 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
// Setup annotations for go completions for registered flags
func prepareCustomAnnotationsForFlags(cmd *Command) {
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
for flag := range flagCompletionFunctions {
// Make sure the completion script calls the __*_go_custom_completion function for
// every registered flag. We need to do this here (and not when the flag was registered
@@ -531,6 +557,11 @@ func writeFlags(buf io.StringWriter, cmd *Command) {
flags_completion=()
`)
+
+ if cmd.DisableFlagParsing {
+ WriteStringAndCheck(buf, " flag_parsing_disabled=1\n")
+ }
+
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
@@ -605,7 +636,7 @@ func writeCmdAliases(buf io.StringWriter, cmd *Command) {
sort.Strings(cmd.Aliases)
- WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n"))
for _, value := range cmd.Aliases {
WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
@@ -629,8 +660,8 @@ func gen(buf io.StringWriter, cmd *Command) {
gen(buf, c)
}
commandName := cmd.CommandPath()
- commandName = strings.Replace(commandName, " ", "_", -1)
- commandName = strings.Replace(commandName, ":", "__", -1)
+ commandName = strings.ReplaceAll(commandName, " ", "_")
+ commandName = strings.ReplaceAll(commandName, ":", "__")
if cmd.Root() == cmd {
WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
index 130f99b923..52919b2fa6 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.md
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -6,6 +6,8 @@ Please refer to [Shell Completions](shell_completions.md) for details.
For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution.
+**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own.
+
The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions.
Some code that works in kubernetes:
diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go
new file mode 100644
index 0000000000..bb4b71892c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -0,0 +1,383 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genBashComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Macs have bash3 for which the bash-completion package doesn't include
+# _init_completion. This is a minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+# This function calls the %[1]s program to obtain the completion
+# results and the directive. It fills the 'out' and 'directive' vars.
+__%[1]s_get_completion_results() {
+ local requestComp lastParam lastChar args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} ''"
+ fi
+
+ # When completing a flag with an = (e.g., %[1]s -n=<TAB>)
+ # bash focuses on the part after the =, so we need to remove
+ # the flag part from $cur
+ if [[ "${cur}" == -*=* ]]; then
+ cur="${cur#*=}"
+ fi
+
+ __%[1]s_debug "Calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "The completion directive is: ${directive}"
+ __%[1]s_debug "The completions are: ${out}"
+}
+
+__%[1]s_process_completion_results() {
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ else
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "Activating no space"
+ compopt -o nospace
+ else
+ __%[1]s_debug "No space directive not supported in this version of bash"
+ fi
+ fi
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "Activating no file completion"
+ compopt +o default
+ else
+ __%[1]s_debug "No file completion directive not supported in this version of bash"
+ fi
+ fi
+ fi
+
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __%[1]s_extract_activeHelp
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+
+ # Do not use quotes around the $completions variable or else newline
+ # characters will be kept.
+ for filter in ${completions[*]}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+
+ # Use printf to strip any trailing newline
+ local subdir
+ subdir=$(printf "%%s" "${completions[0]}")
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ __%[1]s_handle_completion_types
+ fi
+
+ __%[1]s_handle_special_char "$cur" :
+ __%[1]s_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ if [ ${#activeHelp[*]} -ne 0 ]; then
+ printf "\n";
+ printf "%%s\n" "${activeHelp[@]}"
+ printf "\n"
+
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%%s" "${COMP_LINE[@]}"
+ fi
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__%[1]s_extract_activeHelp() {
+ local activeHelpMarker="%[8]s"
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
+ comp=${comp:endIndex}
+ __%[1]s_debug "ActiveHelp found: $comp"
+ if [ -n "$comp" ]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done < <(printf "%%s\n" "${out}")
+}
+
+__%[1]s_handle_completion_types() {
+ __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE"
+
+ case $COMP_TYPE in
+ 37|42)
+ # Type: menu-complete/menu-complete-backward and insert-completions
+ # If the user requested inserting one completion at a time, or all
+ # completions at once on the command-line we must remove the descriptions.
+ # https://github.com/spf13/cobra/issues/1508
+ local tab=$'\t' comp
+ while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
+ # Strip any description
+ comp=${comp%%%%$tab*}
+ # Only consider the completions that match
+ if [[ $comp == "$cur"* ]]; then
+ COMPREPLY+=("$comp")
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+ ;;
+
+ *)
+ # Type: complete (normal completion)
+ __%[1]s_handle_standard_completion_case
+ ;;
+ esac
+}
+
+__%[1]s_handle_standard_completion_case() {
+ local tab=$'\t' comp
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
+ return 0
+ fi
+
+ local longest=0
+ local compline
+ # Look for the longest completion so that we can format things nicely
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
+ # Strip any description before checking the length
+ comp=${compline%%%%$tab*}
+ # Only consider the completions that match
+ [[ $comp == "$cur"* ]] || continue
+ COMPREPLY+=("$compline")
+ if ((${#comp}>longest)); then
+ longest=${#comp}
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+
+ # If there is a single completion left, remove the description text
+ if [ ${#COMPREPLY[*]} -eq 1 ]; then
+ __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
+ comp="${COMPREPLY[0]%%%%$tab*}"
+ __%[1]s_debug "Removed description from single completion, which is now: ${comp}"
+ COMPREPLY[0]=$comp
+ else # Format the descriptions
+ __%[1]s_format_comp_descriptions $longest
+ fi
+}
+
+__%[1]s_handle_special_char()
+{
+ local comp="$1"
+ local char=$2
+ if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
+ local word=${comp%%"${comp##*${char}}"}
+ local idx=${#COMPREPLY[*]}
+ while [[ $((--idx)) -ge 0 ]]; do
+ COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"}
+ done
+ fi
+}
+
+__%[1]s_format_comp_descriptions()
+{
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __%[1]s_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if [[ $maxdesclength -gt 8 ]]; then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
+
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if [ $maxdesclength -gt 0 ]; then
+ if [ ${#desc} -gt $maxdesclength ]; then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
+ fi
+ COMPREPLY[ci]=$comp
+ __%[1]s_debug "Final comp: $comp"
+ fi
+ done
+}
+
+__start_%[1]s()
+{
+ local cur prev words cword split
+
+ COMPREPLY=()
+
+ # Call _init_completion from the bash-completion package
+ # to prepare the arguments properly
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -n "=:" || return
+ else
+ __%[1]s_init_completion -n "=:" || return
+ fi
+
+ __%[1]s_debug
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $cword location, so we need
+ # to truncate the command-line ($words) up to the $cword location.
+ words=("${words[@]:0:$cword+1}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ local out directive
+ __%[1]s_get_completion_results
+ __%[1]s_process_completion_results
+}
+
+if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%[1]s %[1]s
+else
+ complete -o default -o nospace -F __start_%[1]s %[1]s
+fi
+
+# ex: ts=4 sw=4 et filetype=sh
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ activeHelpMarker))
+}
+
+// GenBashCompletionFileV2 generates Bash completion version 2.
+func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletionV2(outFile, includeDesc)
+}
+
+// GenBashCompletionV2 generates Bash completion file version 2
+// and writes it to the passed writer.
+func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {
+ return c.genBashCompletion(w, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index d6cbfd7198..fe44bc8a07 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -1,9 +1,10 @@
-// Copyright © 2013 Steve Francia <spf@spf13.com>.
+// Copyright 2013-2022 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
+//
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -39,15 +40,25 @@ var templateFuncs = template.FuncMap{
}
var initializers []func()
+var finalizers []func()
+
+const (
+ defaultPrefixMatching = false
+ defaultCommandSorting = true
+ defaultCaseInsensitive = false
+)
// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
// to automatically enable in CLI tools.
// Set this to true to enable it.
-var EnablePrefixMatching = false
+var EnablePrefixMatching = defaultPrefixMatching
// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
// To disable sorting, set it to false.
-var EnableCommandSorting = true
+var EnableCommandSorting = defaultCommandSorting
+
+// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default)
+var EnableCaseInsensitive = defaultCaseInsensitive
// MousetrapHelpText enables an information splash screen on Windows
// if the CLI is started from explorer.exe.
@@ -84,6 +95,12 @@ func OnInitialize(y ...func()) {
initializers = append(initializers, y...)
}
+// OnFinalize sets the passed functions to be run when each command's
+// Execute method is terminated.
+func OnFinalize(y ...func()) {
+ finalizers = append(finalizers, y...)
+}
+
// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index d6732ad115..6ff47dd5c3 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -1,9 +1,10 @@
-// Copyright © 2013 Steve Francia <spf@spf13.com>.
+// Copyright 2013-2022 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
+//
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,6 +19,7 @@ package cobra
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -28,9 +30,17 @@ import (
flag "github.com/spf13/pflag"
)
+const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
+
// FParseErrWhitelist configures Flag parse errors to be ignored
type FParseErrWhitelist flag.ParseErrorsWhitelist
+// Structure to manage groups for commands
+type Group struct {
+ ID string
+ Title string
+}
+
// Command is just that, a command for your application.
// E.g. 'go run ...' - 'run' is the command. Cobra requires
// you to define the usage and description as part of your command
@@ -57,15 +67,18 @@ type Command struct {
// Short is the short description shown in the 'help' output.
Short string
+ // The group id under which this subcommand is grouped in the 'help' output of its parent.
+ GroupID string
+
// Long is the long message shown in the 'help <this-command>' output.
Long string
// Example is examples of how to use the command.
Example string
- // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+ // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
ValidArgs []string
- // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
// It is a dynamic version of using ValidArgs.
// Only one of ValidArgs and ValidArgsFunction can be used for a command.
ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
@@ -74,11 +87,12 @@ type Command struct {
Args PositionalArgs
// ArgAliases is List of aliases for ValidArgs.
- // These are not suggested to the user in the bash completion,
+ // These are not suggested to the user in the shell completion,
// but accepted if entered manually.
ArgAliases []string
- // BashCompletionFunction is custom functions used by the bash autocompletion generator.
+ // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
+ // For portability with other shells, it is recommended to instead use ValidArgsFunction
BashCompletionFunction string
// Deprecated defines, if this command is deprecated and should print this string when used.
@@ -123,6 +137,9 @@ type Command struct {
// PersistentPostRunE: PersistentPostRun but returns an error.
PersistentPostRunE func(cmd *Command, args []string) error
+ // groups for subcommands
+ commandgroups []*Group
+
// args is actual args parsed from flags.
args []string
// flagErrorBuf contains all error messages from pflag.
@@ -155,6 +172,12 @@ type Command struct {
// helpCommand is command with usage 'help'. If it's not defined by user,
// cobra uses default help command.
helpCommand *Command
+ // helpCommandGroupID is the group id for the helpCommand
+ helpCommandGroupID string
+
+ // completionCommandGroupID is the group id for the completion command
+ completionCommandGroupID string
+
// versionTemplate is the version template defined by user.
versionTemplate string
@@ -165,9 +188,12 @@ type Command struct {
// errWriter is a writer defined by the user that replaces stderr
errWriter io.Writer
- //FParseErrWhitelist flag parse errors to be ignored
+ // FParseErrWhitelist flag parse errors to be ignored
FParseErrWhitelist FParseErrWhitelist
+ // CompletionOptions is a set of options to control the handling of shell completion
+ CompletionOptions CompletionOptions
+
// commandsAreSorted defines, if command slice are sorted or not.
commandsAreSorted bool
// commandCalledAs is the name or alias value used to call this command.
@@ -220,12 +246,23 @@ type Command struct {
SuggestionsMinimumDistance int
}
-// Context returns underlying command context. If command wasn't
-// executed with ExecuteContext Context returns Background context.
+// Context returns underlying command context. If command was executed
+// with ExecuteContext or the context was set with SetContext, the
+// previously set context will be returned. Otherwise, nil is returned.
+//
+// Notice that a call to Execute and ExecuteC will replace a nil context of
+// a command with a context.Background, so a background context will be
+// returned by Context after one of these functions has been called.
func (c *Command) Context() context.Context {
return c.ctx
}
+// SetContext sets context for the command. This context will be overwritten by
+// Command.ExecuteContext or Command.ExecuteContextC.
+func (c *Command) SetContext(ctx context.Context) {
+ c.ctx = ctx
+}
+
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
@@ -284,6 +321,21 @@ func (c *Command) SetHelpCommand(cmd *Command) {
c.helpCommand = cmd
}
+// SetHelpCommandGroup sets the group id of the help command.
+func (c *Command) SetHelpCommandGroupID(groupID string) {
+ if c.helpCommand != nil {
+ c.helpCommand.GroupID = groupID
+ }
+ // helpCommandGroupID is used if no helpCommand is defined by the user
+ c.helpCommandGroupID = groupID
+}
+
+// SetCompletionCommandGroup sets the group id of the completion command.
+func (c *Command) SetCompletionCommandGroupID(groupID string) {
+ // completionCommandGroupID is used if no completion command is defined by the user
+ c.Root().completionCommandGroupID = groupID
+}
+
// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
func (c *Command) SetHelpTemplate(s string) {
c.helpTemplate = s
@@ -492,10 +544,16 @@ Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
-{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}}
+
+Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}}
+
+{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}}
-Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
- {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
@@ -660,7 +718,7 @@ func (c *Command) findSuggestions(arg string) string {
func (c *Command) findNext(next string) *Command {
matches := make([]*Command, 0)
for _, cmd := range c.commands {
- if cmd.Name() == next || cmd.HasAlias(next) {
+ if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) {
cmd.commandCalledAs.name = next
return cmd
}
@@ -817,6 +875,8 @@ func (c *Command) execute(a []string) (err error) {
c.preRun()
+ defer c.postRun()
+
argWoFlags := c.Flags().Args()
if c.DisableFlagParsing {
argWoFlags = a
@@ -845,9 +905,13 @@ func (c *Command) execute(a []string) (err error) {
c.PreRun(c, argWoFlags)
}
- if err := c.validateRequiredFlags(); err != nil {
+ if err := c.ValidateRequiredFlags(); err != nil {
+ return err
+ }
+ if err := c.ValidateFlagGroups(); err != nil {
return err
}
+
if c.RunE != nil {
if err := c.RunE(c, argWoFlags); err != nil {
return err
@@ -883,8 +947,15 @@ func (c *Command) preRun() {
}
}
+func (c *Command) postRun() {
+ for _, x := range finalizers {
+ x()
+ }
+}
+
// ExecuteContext is the same as Execute(), but sets the ctx on the command.
-// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
func (c *Command) ExecuteContext(ctx context.Context) error {
c.ctx = ctx
return c.Execute()
@@ -898,6 +969,14 @@ func (c *Command) Execute() error {
return err
}
+// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) {
+ c.ctx = ctx
+ return c.ExecuteC()
+}
+
// ExecuteC executes the command.
func (c *Command) ExecuteC() (cmd *Command, err error) {
if c.ctx == nil {
@@ -914,9 +993,14 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
preExecHookFn(c)
}
- // initialize help as the last point possible to allow for user
- // overriding
+ // initialize help at the last point to allow for user overriding
c.InitDefaultHelpCmd()
+ // initialize completion at the last point to allow for user overriding
+ c.InitDefaultCompletionCmd()
+
+ // Now that all commands have been created, let's make sure all groups
+ // are properly created also
+ c.checkCommandGroups()
args := c.args
@@ -925,7 +1009,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
args = os.Args[1:]
}
- // initialize the hidden command to be used for bash completion
+ // initialize the hidden command to be used for shell completion
c.initCompleteCmd(args)
var flags []string
@@ -961,7 +1045,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
if err != nil {
// Always show help if requested, even if SilenceErrors is in
// effect
- if err == flag.ErrHelp {
+ if errors.Is(err, flag.ErrHelp) {
cmd.HelpFunc()(cmd, args)
return cmd, nil
}
@@ -983,12 +1067,13 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
func (c *Command) ValidateArgs(args []string) error {
if c.Args == nil {
- return nil
+ return ArbitraryArgs(c, args)
}
return c.Args(c, args)
}
-func (c *Command) validateRequiredFlags() error {
+// ValidateRequiredFlags validates all required flags are present and returns an error otherwise
+func (c *Command) ValidateRequiredFlags() error {
if c.DisableFlagParsing {
return nil
}
@@ -1011,6 +1096,19 @@ func (c *Command) validateRequiredFlags() error {
return nil
}
+// checkCommandGroups checks if a command has been added to a group that does not exists.
+// If so, we panic because it indicates a coding error that should be corrected.
+func (c *Command) checkCommandGroups() {
+ for _, sub := range c.commands {
+ // if Group is not defined let the developer know right away
+ if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) {
+ panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath()))
+ }
+
+ sub.checkCommandGroups()
+ }
+}
+
// InitDefaultHelpFlag adds default help flag to c.
// It is called automatically by executing the c or by calling help and usage.
// If c already has help flag, it will do nothing.
@@ -1024,6 +1122,7 @@ func (c *Command) InitDefaultHelpFlag() {
usage += c.Name()
}
c.Flags().BoolP("help", "h", false, usage)
+ _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"})
}
}
@@ -1049,6 +1148,7 @@ func (c *Command) InitDefaultVersionFlag() {
} else {
c.Flags().Bool("version", false, usage)
}
+ _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"})
}
}
@@ -1091,10 +1191,12 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`,
c.Printf("Unknown help topic %#q\n", args)
CheckErr(c.Root().Usage())
} else {
- cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown
CheckErr(cmd.Help())
}
},
+ GroupID: c.helpCommandGroupID,
}
}
c.RemoveCommand(c.helpCommand)
@@ -1155,6 +1257,36 @@ func (c *Command) AddCommand(cmds ...*Command) {
}
}
+// Groups returns a slice of child command groups.
+func (c *Command) Groups() []*Group {
+ return c.commandgroups
+}
+
+// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
+func (c *Command) AllChildCommandsHaveGroup() bool {
+ for _, sub := range c.commands {
+ if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" {
+ return false
+ }
+ }
+ return true
+}
+
+// ContainGroups return if groupID exists in the list of command groups.
+func (c *Command) ContainsGroup(groupID string) bool {
+ for _, x := range c.commandgroups {
+ if x.ID == groupID {
+ return true
+ }
+ }
+ return false
+}
+
+// AddGroup adds one or more command groups to this parent command.
+func (c *Command) AddGroup(groups ...*Group) {
+ c.commandgroups = append(c.commandgroups, groups...)
+}
+
// RemoveCommand removes one or more commands from a parent command.
func (c *Command) RemoveCommand(cmds ...*Command) {
commands := []*Command{}
@@ -1298,7 +1430,7 @@ func (c *Command) Name() string {
// HasAlias determines if a given string is an alias of the command.
func (c *Command) HasAlias(s string) bool {
for _, a := range c.Aliases {
- if a == s {
+ if commandNameMatches(a, s) {
return true
}
}
@@ -1475,7 +1607,8 @@ func (c *Command) LocalFlags() *flag.FlagSet {
}
addToLocal := func(f *flag.Flag) {
- if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+ // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag
+ if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) {
c.lflags.AddFlag(f)
}
}
@@ -1664,3 +1797,14 @@ func (c *Command) updateParentsPflags() {
c.parentsPflags.AddFlagSet(parent.PersistentFlags())
})
}
+
+// commandNameMatches checks if two command names are equal
+// taking into account case sensitivity according to
+// EnableCaseInsensitive global configuration.
+func commandNameMatches(s string, t string) bool {
+ if EnableCaseInsensitive {
+ return strings.EqualFold(s, t)
+ }
+
+ return s == t
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
index 6159c1cc19..2b77f8f019 100644
--- a/vendor/github.com/spf13/cobra/command_notwin.go
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -1,3 +1,18 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
// +build !windows
package cobra
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
index 8768b1736d..520f23abf0 100644
--- a/vendor/github.com/spf13/cobra/command_win.go
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -1,3 +1,18 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
// +build windows
package cobra
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/completions.go
index fa060c147b..e8a0206db1 100644
--- a/vendor/github.com/spf13/cobra/custom_completions.go
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -1,9 +1,24 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package cobra
import (
"fmt"
"os"
"strings"
+ "sync"
"github.com/spf13/pflag"
)
@@ -17,13 +32,25 @@ const (
ShellCompNoDescRequestCmd = "__completeNoDesc"
)
-// Global map of flag completion functions.
+// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it.
var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+// lock for reading and writing from flagCompletionFunctions
+var flagCompletionMutex = &sync.RWMutex{}
+
// ShellCompDirective is a bit map representing the different behaviors the shell
// can be instructed to have once completions have been provided.
type ShellCompDirective int
+type flagCompError struct {
+ subCommand string
+ flagName string
+}
+
+func (e *flagCompError) Error() string {
+ return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'"
+}
+
const (
// ShellCompDirectiveError indicates an error occurred and completions should be ignored.
ShellCompDirectiveError ShellCompDirective = 1 << iota
@@ -34,7 +61,6 @@ const (
// ShellCompDirectiveNoFileComp indicates that the shell should not provide
// file completion even when no completion is provided.
- // This currently does not work for zsh or bash < 4
ShellCompDirectiveNoFileComp
// ShellCompDirectiveFilterFileExt indicates that the provided completions
@@ -63,12 +89,51 @@ const (
ShellCompDirectiveDefault ShellCompDirective = 0
)
+const (
+ // Constants for the completion command
+ compCmdName = "completion"
+ compCmdNoDescFlagName = "no-descriptions"
+ compCmdNoDescFlagDesc = "disable completion descriptions"
+ compCmdNoDescFlagDefault = false
+)
+
+// CompletionOptions are the options to control shell completion
+type CompletionOptions struct {
+ // DisableDefaultCmd prevents Cobra from creating a default 'completion' command
+ DisableDefaultCmd bool
+ // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
+ // for shells that support completion descriptions
+ DisableNoDescFlag bool
+ // DisableDescriptions turns off all completion descriptions for shells
+ // that support them
+ DisableDescriptions bool
+ // HiddenDefaultCmd makes the default 'completion' command hidden
+ HiddenDefaultCmd bool
+}
+
+// NoFileCompletions can be used to disable file completion for commands that should
+// not trigger file completions.
+func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return nil, ShellCompDirectiveNoFileComp
+}
+
+// FixedCompletions can be used to create a completion function which always
+// returns the same results.
+func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return choices, directive
+ }
+}
+
// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
flag := c.Flag(flagName)
if flag == nil {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
}
+ flagCompletionMutex.Lock()
+ defer flagCompletionMutex.Unlock()
+
if _, exists := flagCompletionFunctions[flag]; exists {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
}
@@ -127,6 +192,12 @@ func (c *Command) initCompleteCmd(args []string) {
noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
for _, comp := range completions {
+ if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable {
+ // Remove all activeHelp entries in this case
+ if strings.HasPrefix(comp, activeHelpMarker) {
+ continue
+ }
+ }
if noDescriptions {
// Remove any description that may be included following a tab character.
comp = strings.Split(comp, "\t")[0]
@@ -149,10 +220,6 @@ func (c *Command) initCompleteCmd(args []string) {
fmt.Fprintln(finalCmd.OutOrStdout(), comp)
}
- if directive >= shellCompDirectiveMaxValue {
- directive = ShellCompDirectiveDefault
- }
-
// As the last printout, print the completion directive for the completion script to parse.
// The directive integer must be that last character following a single colon (:).
// The completion script expects :<directive>
@@ -189,29 +256,75 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
if c.Root().TraverseChildren {
finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
} else {
- finalCmd, finalArgs, err = c.Root().Find(trimmedArgs)
+ // For Root commands that don't specify any value for their Args fields, when we call
+ // Find(), if those Root commands don't have any sub-commands, they will accept arguments.
+ // However, because we have added the __complete sub-command in the current code path, the
+ // call to Find() -> legacyArgs() will return an error if there are any arguments.
+ // To avoid this, we first remove the __complete command to get back to having no sub-commands.
+ rootCmd := c.Root()
+ if len(rootCmd.Commands()) == 1 {
+ rootCmd.RemoveCommand(c)
+ }
+
+ finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs)
}
if err != nil {
// Unable to find the real command. E.g., <program> someInvalidCmd <TAB>
return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
}
+ finalCmd.ctx = c.ctx
+
+ // These flags are normally added when `execute()` is called on `finalCmd`,
+ // however, when doing completion, we don't call `finalCmd.execute()`.
+ // Let's add the --help and --version flag ourselves.
+ finalCmd.InitDefaultHelpFlag()
+ finalCmd.InitDefaultVersionFlag()
// Check if we are doing flag value completion before parsing the flags.
// This is important because if we are completing a flag value, we need to also
// remove the flag name argument from the list of finalArgs or else the parsing
// could fail due to an invalid value (incomplete) for the flag.
- flag, finalArgs, toComplete, err := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
- if err != nil {
- // Error while attempting to parse flags
- return finalCmd, []string{}, ShellCompDirectiveDefault, err
- }
+ flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+
+ // Check if interspersed is false or -- was set on a previous arg.
+ // This works by counting the arguments. Normally -- is not counted as arg but
+ // if -- was already set or interspersed is false and there is already one arg then
+ // the extra added -- is counted as arg.
+ flagCompletion := true
+ _ = finalCmd.ParseFlags(append(finalArgs, "--"))
+ newArgCount := finalCmd.Flags().NArg()
// Parse the flags early so we can check if required flags are set
if err = finalCmd.ParseFlags(finalArgs); err != nil {
return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
}
- if flag != nil {
+ realArgCount := finalCmd.Flags().NArg()
+ if newArgCount > realArgCount {
+ // don't do flag completion (see above)
+ flagCompletion = false
+ }
+ // Error while attempting to parse flags
+ if flagErr != nil {
+ // If error type is flagCompError and we don't want flagCompletion we should ignore the error
+ if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr
+ }
+ }
+
+ // Look for the --help or --version flags. If they are present,
+ // there should be no further completions.
+ if helpOrVersionFlagPresent(finalCmd) {
+ return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ if flag != nil && flagCompletion {
// Check if we are completing a flag value subject to annotations
if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
if len(validExts) != 0 {
@@ -235,12 +348,19 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
}
}
+ var completions []string
+ var directive ShellCompDirective
+
+ // Enforce flag groups before doing flag completions
+ finalCmd.enforceFlagGroupsForCompletion()
+
+ // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
+ // doing this allows for completion of persistent flag names even for commands that disable flag parsing.
+ //
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
// the flag name to be complete
- if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") {
- var completions []string
-
+ if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
// First check for required flags
completions = completeRequireFlags(finalCmd, toComplete)
@@ -267,92 +387,94 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
})
}
- directive := ShellCompDirectiveNoFileComp
+ directive = ShellCompDirectiveNoFileComp
if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
// If there is a single completion, the shell usually adds a space
// after the completion. We don't want that if the flag ends with an =
directive = ShellCompDirectiveNoSpace
}
- return finalCmd, completions, directive, nil
- }
-
- // We only remove the flags from the arguments if DisableFlagParsing is not set.
- // This is important for commands which have requested to do their own flag completion.
- if !finalCmd.DisableFlagParsing {
- finalArgs = finalCmd.Flags().Args()
- }
- var completions []string
- directive := ShellCompDirectiveDefault
- if flag == nil {
- foundLocalNonPersistentFlag := false
- // If TraverseChildren is true on the root command we don't check for
- // local flags because we can use a local flag on a parent command
- if !finalCmd.Root().TraverseChildren {
- // Check if there are any local, non-persistent flags on the command-line
- localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
- finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
- if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
- foundLocalNonPersistentFlag = true
- }
- })
+ if !finalCmd.DisableFlagParsing {
+ // If DisableFlagParsing==false, we have completed the flags as known by Cobra;
+ // we can return what we found.
+ // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we
+ // let the logic continue to see if ValidArgsFunction needs to be called.
+ return finalCmd, completions, directive, nil
}
+ } else {
+ directive = ShellCompDirectiveDefault
+ if flag == nil {
+ foundLocalNonPersistentFlag := false
+ // If TraverseChildren is true on the root command we don't check for
+ // local flags because we can use a local flag on a parent command
+ if !finalCmd.Root().TraverseChildren {
+ // Check if there are any local, non-persistent flags on the command-line
+ localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
+ foundLocalNonPersistentFlag = true
+ }
+ })
+ }
- // Complete subcommand names, including the help command
- if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
- // We only complete sub-commands if:
- // - there are no arguments on the command-line and
- // - there are no local, non-peristent flag on the command-line or TraverseChildren is true
- for _, subCmd := range finalCmd.Commands() {
- if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
- if strings.HasPrefix(subCmd.Name(), toComplete) {
- completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ // Complete subcommand names, including the help command
+ if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
+ // We only complete sub-commands if:
+ // - there are no arguments on the command-line and
+ // - there are no local, non-persistent flags on the command-line or TraverseChildren is true
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ directive = ShellCompDirectiveNoFileComp
}
- directive = ShellCompDirectiveNoFileComp
}
}
- }
- // Complete required flags even without the '-' prefix
- completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
-
- // Always complete ValidArgs, even if we are completing a subcommand name.
- // This is for commands that have both subcommands and ValidArgs.
- if len(finalCmd.ValidArgs) > 0 {
- if len(finalArgs) == 0 {
- // ValidArgs are only for the first argument
- for _, validArg := range finalCmd.ValidArgs {
- if strings.HasPrefix(validArg, toComplete) {
- completions = append(completions, validArg)
+ // Complete required flags even without the '-' prefix
+ completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
+
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ if len(finalCmd.ValidArgs) > 0 {
+ if len(finalArgs) == 0 {
+ // ValidArgs are only for the first argument
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
}
- }
- directive = ShellCompDirectiveNoFileComp
-
- // If no completions were found within commands or ValidArgs,
- // see if there are any ArgAliases that should be completed.
- if len(completions) == 0 {
- for _, argAlias := range finalCmd.ArgAliases {
- if strings.HasPrefix(argAlias, toComplete) {
- completions = append(completions, argAlias)
+ directive = ShellCompDirectiveNoFileComp
+
+ // If no completions were found within commands or ValidArgs,
+ // see if there are any ArgAliases that should be completed.
+ if len(completions) == 0 {
+ for _, argAlias := range finalCmd.ArgAliases {
+ if strings.HasPrefix(argAlias, toComplete) {
+ completions = append(completions, argAlias)
+ }
}
}
}
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, directive, nil
}
- // If there are ValidArgs specified (even if they don't match), we stop completion.
- // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
- return finalCmd, completions, directive, nil
+ // Let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
}
-
- // Let the logic continue so as to add any ValidArgsFunction completions,
- // even if we already found sub-commands.
- // This is for commands that have subcommands but also specify a ValidArgsFunction.
}
// Find the completion function for the flag or command
var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
- if flag != nil {
+ if flag != nil && flagCompletion {
+ flagCompletionMutex.RLock()
completionFn = flagCompletionFunctions[flag]
+ flagCompletionMutex.RUnlock()
} else {
completionFn = finalCmd.ValidArgsFunction
}
@@ -367,6 +489,18 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
return finalCmd, completions, directive, nil
}
+func helpOrVersionFlagPresent(cmd *Command) bool {
+ if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil &&
+ len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed {
+ return true
+ }
+ if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil &&
+ len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed {
+ return true
+ }
+ return false
+}
+
func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
if nonCompletableFlag(flag) {
return []string{}
@@ -435,6 +569,7 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p
var flagName string
trimmedArgs := args
flagWithEqual := false
+ orgLastArg := lastArg
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as that function
@@ -442,7 +577,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p
if len(lastArg) > 0 && lastArg[0] == '-' {
if index := strings.Index(lastArg, "="); index >= 0 {
// Flag with an =
- flagName = strings.TrimLeft(lastArg[:index], "-")
+ if strings.HasPrefix(lastArg[:index], "--") {
+ // Flag has full name
+ flagName = lastArg[2:index]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = lastArg[index-1 : index]
+ }
lastArg = lastArg[index+1:]
flagWithEqual = true
} else {
@@ -459,8 +603,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p
// If the flag contains an = it means it has already been fully processed,
// so we don't need to deal with it here.
if index := strings.Index(prevArg, "="); index < 0 {
- flagName = strings.TrimLeft(prevArg, "-")
-
+ if strings.HasPrefix(prevArg, "--") {
+ // Flag has full name
+ flagName = prevArg[2:]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = prevArg[len(prevArg)-1:]
+ }
// Remove the uncompleted flag or else there could be an error created
// for an invalid value for that flag
trimmedArgs = args[:len(args)-1]
@@ -476,9 +628,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p
flag := findFlag(finalCmd, flagName)
if flag == nil {
- // Flag not supported by this command, nothing to complete
- err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName)
- return nil, nil, "", err
+ // Flag not supported by this command, the interspersed option might be set so return the original args
+ return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName}
}
if !flagWithEqual {
@@ -494,6 +645,169 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p
return flag, trimmedArgs, lastArg, nil
}
+// InitDefaultCompletionCmd adds a default 'completion' command to c.
+// This function will do nothing if any of the following is true:
+// 1- the feature has been explicitly disabled by the program,
+// 2- c has no subcommands (to avoid creating one),
+// 3- c already has a 'completion' command provided by the program.
+func (c *Command) InitDefaultCompletionCmd() {
+ if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() {
+ return
+ }
+
+ for _, cmd := range c.commands {
+ if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) {
+ // A completion command is already available
+ return
+ }
+ }
+
+ haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions
+
+ completionCmd := &Command{
+ Use: compCmdName,
+ Short: "Generate the autocompletion script for the specified shell",
+ Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell.
+See each sub-command's help for details on how to use the generated script.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ Hidden: c.CompletionOptions.HiddenDefaultCmd,
+ GroupID: c.completionCommandGroupID,
+ }
+ c.AddCommand(completionCmd)
+
+ out := c.OutOrStdout()
+ noDesc := c.CompletionOptions.DisableDescriptions
+ shortDesc := "Generate the autocompletion script for %s"
+ bash := &Command{
+ Use: "bash",
+ Short: fmt.Sprintf(shortDesc, "bash"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell.
+
+This script depends on the 'bash-completion' package.
+If it is not installed already, you can install it via your OS's package manager.
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion bash)
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion bash > /etc/bash_completion.d/%[1]s
+
+#### macOS:
+
+ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ DisableFlagsInUseLine: true,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenBashCompletionV2(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ zsh := &Command{
+ Use: "zsh",
+ Short: fmt.Sprintf(shortDesc, "zsh"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell.
+
+If shell completion is not already enabled in your environment you will need
+to enable it. You can execute the following once:
+
+ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion zsh); compdef _%[1]s %[1]s
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
+
+#### macOS:
+
+ %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenZshCompletionNoDesc(out)
+ }
+ return cmd.Root().GenZshCompletion(out)
+ },
+ }
+ if haveNoDescFlag {
+ zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ fish := &Command{
+ Use: "fish",
+ Short: fmt.Sprintf(shortDesc, "fish"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell.
+
+To load completions in your current shell session:
+
+ %[1]s completion fish | source
+
+To load completions for every new session, execute once:
+
+ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenFishCompletion(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ powershell := &Command{
+ Use: "powershell",
+ Short: fmt.Sprintf(shortDesc, "powershell"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+
+ %[1]s completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenPowerShellCompletion(out)
+ }
+ return cmd.Root().GenPowerShellCompletionWithDesc(out)
+
+ },
+ }
+ if haveNoDescFlag {
+ powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ completionCmd.AddCommand(bash, zsh, fish, powershell)
+}
+
func findFlag(cmd *Command, name string) *pflag.Flag {
flagSet := cmd.Flags()
if len(name) == 1 {
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
index 3e112347d7..97112a17b2 100644
--- a/vendor/github.com/spf13/cobra/fish_completions.go
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -1,3 +1,17 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package cobra
import (
@@ -11,8 +25,8 @@ import (
func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
// Variables should not contain a '-' or ':' character
nameForVar := name
- nameForVar = strings.Replace(nameForVar, "-", "_", -1)
- nameForVar = strings.Replace(nameForVar, ":", "_", -1)
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
compCmd := ShellCompRequestCmd
if !includeDesc {
@@ -21,44 +35,48 @@ func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
WriteStringAndCheck(buf, fmt.Sprintf(`
function __%[1]s_debug
- set file "$BASH_COMP_DEBUG_FILE"
+ set -l file "$BASH_COMP_DEBUG_FILE"
if test -n "$file"
echo "$argv" >> $file
end
end
function __%[1]s_perform_completion
- __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
+ __%[1]s_debug "Starting __%[1]s_perform_completion"
- set args (string split -- " " "$argv")
- set lastArg "$args[-1]"
+ # Extract all args except the last one
+ set -l args (commandline -opc)
+ # Extract the last arg and escape it in case it is a space
+ set -l lastArg (string escape -- (commandline -ct))
__%[1]s_debug "args: $args"
__%[1]s_debug "last arg: $lastArg"
- set emptyArg ""
- if test -z "$lastArg"
- __%[1]s_debug "Setting emptyArg"
- set emptyArg \"\"
- end
- __%[1]s_debug "emptyArg: $emptyArg"
-
- if not type -q "$args[1]"
- # This can happen when "complete --do-complete %[2]s" is called when running this script.
- __%[1]s_debug "Cannot find $args[1]. No completions."
- return
- end
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
- set requestComp "$args[1] %[3]s $args[2..-1] $emptyArg"
__%[1]s_debug "Calling $requestComp"
+ set -l results (eval $requestComp 2> /dev/null)
+
+ # Some programs may output extra empty lines after the directive.
+ # Let's ignore them or else it will break completion.
+ # Ref: https://github.com/spf13/cobra/issues/1279
+ for line in $results[-1..1]
+ if test (string trim -- $line) = ""
+ # Found an empty line, remove it
+ set results $results[1..-2]
+ else
+ # Found non-empty line, we have our proper output
+ break
+ end
+ end
- set results (eval $requestComp 2> /dev/null)
- set comps $results[1..-2]
- set directiveLine $results[-1]
+ set -l comps $results[1..-2]
+ set -l directiveLine $results[-1]
# For Fish, when completing a flag with an = (e.g., <program> -n=<TAB>)
# completions must be prefixed with the flag
- set flagPrefix (string match -r -- '-.*=' "$lastArg")
+ set -l flagPrefix (string match -r -- '-.*=' "$lastArg")
__%[1]s_debug "Comps: $comps"
__%[1]s_debug "DirectiveLine: $directiveLine"
@@ -71,120 +89,129 @@ function __%[1]s_perform_completion
printf "%%s\n" "$directiveLine"
end
-# This function does three things:
-# 1- Obtain the completions and store them in the global __%[1]s_comp_results
-# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed
-# and unset it otherwise
-# 3- Return true if the completion results are not empty
+# This function does two things:
+# - Obtain the completions and store them in the global __%[1]s_comp_results
+# - Return false if file completion should be performed
function __%[1]s_prepare_completions
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+
# Start fresh
- set --erase __%[1]s_comp_do_file_comp
set --erase __%[1]s_comp_results
- # Check if the command-line is already provided. This is useful for testing.
- if not set --query __%[1]s_comp_commandLine
- # Use the -c flag to allow for completion in the middle of the line
- set __%[1]s_comp_commandLine (commandline -c)
- end
- __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine"
-
- set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine")
- set --erase __%[1]s_comp_commandLine
+ set -l results (__%[1]s_perform_completion)
__%[1]s_debug "Completion results: $results"
if test -z "$results"
__%[1]s_debug "No completion, probably due to a failure"
# Might as well do file completion, in case it helps
- set --global __%[1]s_comp_do_file_comp 1
return 1
end
- set directive (string sub --start 2 $results[-1])
+ set -l directive (string sub --start 2 $results[-1])
set --global __%[1]s_comp_results $results[1..-2]
__%[1]s_debug "Completions are: $__%[1]s_comp_results"
__%[1]s_debug "Directive is: $directive"
- set shellCompDirectiveError %[4]d
- set shellCompDirectiveNoSpace %[5]d
- set shellCompDirectiveNoFileComp %[6]d
- set shellCompDirectiveFilterFileExt %[7]d
- set shellCompDirectiveFilterDirs %[8]d
+ set -l shellCompDirectiveError %[4]d
+ set -l shellCompDirectiveNoSpace %[5]d
+ set -l shellCompDirectiveNoFileComp %[6]d
+ set -l shellCompDirectiveFilterFileExt %[7]d
+ set -l shellCompDirectiveFilterDirs %[8]d
if test -z "$directive"
set directive 0
end
- set compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
+ set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
if test $compErr -eq 1
__%[1]s_debug "Received error directive: aborting."
# Might as well do file completion, in case it helps
- set --global __%[1]s_comp_do_file_comp 1
return 1
end
- set filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
- set dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
+ set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
+ set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
if test $filefilter -eq 1; or test $dirfilter -eq 1
__%[1]s_debug "File extension filtering or directory filtering not supported"
# Do full file completion instead
- set --global __%[1]s_comp_do_file_comp 1
return 1
end
- set nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
- set nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
+ set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
+ set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
__%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
- # Important not to quote the variable for count to work
- set numComps (count $__%[1]s_comp_results)
- __%[1]s_debug "numComps: $numComps"
-
- if test $numComps -eq 1; and test $nospace -ne 0
- # To support the "nospace" directive we trick the shell
- # by outputting an extra, longer completion.
- __%[1]s_debug "Adding second completion to perform nospace directive"
- set --append __%[1]s_comp_results $__%[1]s_comp_results[1].
- end
-
- if test $numComps -eq 0; and test $nofiles -eq 0
- __%[1]s_debug "Requesting file completion"
- set --global __%[1]s_comp_do_file_comp 1
+ # If we want to prevent a space, or if file completion is NOT disabled,
+ # we need to count the number of valid completions.
+ # To do so, we will filter on prefix as the completions we have received
+ # may not already be filtered so as to allow fish to match on different
+ # criteria than the prefix.
+ if test $nospace -ne 0; or test $nofiles -eq 0
+ set -l prefix (commandline -t | string escape --style=regex)
+ __%[1]s_debug "prefix: $prefix"
+
+ set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results)
+ set --global __%[1]s_comp_results $completions
+ __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results"
+
+ # Important not to quote the variable for count to work
+ set -l numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # We must first split on \t to get rid of the descriptions to be
+ # able to check what the actual completion will be.
+ # We don't need descriptions anyway since there is only a single
+ # real completion which the shell will expand immediately.
+ set -l split (string split --max 1 \t $__%[1]s_comp_results[1])
+
+ # Fish won't add a space if the completion ends with any
+ # of the following characters: @=/:.,
+ set -l lastChar (string sub -s -1 -- $split)
+ if not string match -r -q "[@=/:.,]" -- "$lastChar"
+ # In other cases, to support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --global __%[1]s_comp_results $split[1] $split[1].
+ __%[1]s_debug "Completions are now: $__%[1]s_comp_results"
+ end
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ # To be consistent with bash and zsh, we only trigger file
+ # completion when there are no other completions
+ __%[1]s_debug "Requesting file completion"
+ return 1
+ end
end
- # If we don't want file completion, we must return true even if there
- # are no completions found. This is because fish will perform the last
- # completion command, even if its condition is false, if no other
- # completion command was triggered
- return (not set --query __%[1]s_comp_do_file_comp)
+ return 0
end
# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
# so we can properly delete any completions provided by another script.
-# The space after the the program name is essential to trigger completion for the program
-# and not completion of the program name itself.
-complete --do-complete "%[2]s " > /dev/null 2>&1
-# Using '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
+# Only do this if the program can be found, or else fish may print some errors; besides,
+# the existing completions will only be loaded if the program can be found.
+if type -q "%[2]s"
+ # The space after the program name is essential to trigger completion for the program
+ # and not completion of the program name itself.
+ # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
+ complete --do-complete "%[2]s " > /dev/null 2>&1
+end
# Remove any pre-existing completions for the program since we will be handling all of them.
complete -c %[2]s -e
-# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions
-# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable.
-#
-# This completion will be run second as complete commands are added FILO.
-# It triggers file completion choices when __%[1]s_comp_do_file_comp is set.
-complete -c %[2]s -n 'set --query __%[1]s_comp_do_file_comp'
-
-# This completion will be run first as complete commands are added FILO.
-# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results and __%[1]s_comp_do_file_comp.
-# It provides the program's completion choices.
+# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
+# which provides the program's completion choices.
complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
`, nameForVar, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
// GenFishCompletion generates fish completion file and writes to the passed writer.
diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go
new file mode 100644
index 0000000000..9c377aaf9c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/flag_groups.go
@@ -0,0 +1,224 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ requiredAsGroup = "cobra_annotation_required_if_others_set"
+ mutuallyExclusive = "cobra_annotation_mutually_exclusive"
+)
+
+// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+// if the command is invoked with a subset (but not all) of the given flags.
+func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+// if the command is invoked with more than one flag from the given set of flags.
+func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v))
+ }
+ // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed.
+ if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the
+// first error encountered.
+func (c *Command) ValidateFlagGroups() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+
+ // groupStatus format is the list of flags as a unique ID,
+ // then a map of each flag name and whether it is set or not.
+ groupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
+ })
+
+ if err := validateRequiredFlagGroups(groupStatus); err != nil {
+ return err
+ }
+ if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
+ return err
+ }
+ return nil
+}
+
+func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool {
+ for _, fname := range flagnames {
+ f := fs.Lookup(fname)
+ if f == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) {
+ groupInfo, found := pflag.Annotations[annotation]
+ if found {
+ for _, group := range groupInfo {
+ if groupStatus[group] == nil {
+ flagnames := strings.Split(group, " ")
+
+ // Only consider this flag group at all if all the flags are defined.
+ if !hasAllFlags(flags, flagnames...) {
+ continue
+ }
+
+ groupStatus[group] = map[string]bool{}
+ for _, name := range flagnames {
+ groupStatus[group][name] = false
+ }
+ }
+
+ groupStatus[group][pflag.Name] = pflag.Changed
+ }
+ }
+}
+
+func validateRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+
+ unset := []string{}
+ for flagname, isSet := range flagnameAndStatus {
+ if !isSet {
+ unset = append(unset, flagname)
+ }
+ }
+ if len(unset) == len(flagnameAndStatus) || len(unset) == 0 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(unset)
+ return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset)
+ }
+
+ return nil
+}
+
+func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) == 0 || len(set) == 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set)
+ }
+ return nil
+}
+
+func sortedKeys(m map[string]map[string]bool) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// enforceFlagGroupsForCompletion will do the following:
+// - when a flag in a group is present, other flags in the group will be marked required
+// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
+// This allows the standard completion logic to behave appropriately for flag groups
+func (c *Command) enforceFlagGroupsForCompletion() {
+ if c.DisableFlagParsing {
+ return
+ }
+
+ flags := c.Flags()
+ groupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ c.Flags().VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
+ })
+
+ // If a flag that is part of a group is present, we make all the other flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range groupStatus {
+ for _, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the group is set, mark the other ones as required
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+ }
+
+ // If a flag that is mutually exclusive to others is present, we hide the other
+ // flags of that group so the shell completion does not suggest them
+ for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
+ for flagName, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the mutually exclusive group is set, mark the other ones as hidden
+ // Don't mark the flag that is already set as hidden because it may be an
+ // array or slice flag and therefore must continue being suggested
+ for _, fName := range strings.Split(flagList, " ") {
+ if fName != flagName {
+ flag := c.Flags().Lookup(fName)
+ flag.Hidden = true
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
index c55be71cd1..004de42e41 100644
--- a/vendor/github.com/spf13/cobra/powershell_completions.go
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -1,3 +1,17 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
// can be downloaded separately for windows 7 or 8.1).
@@ -8,9 +22,15 @@ import (
"fmt"
"io"
"os"
+ "strings"
)
func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.Replace(nameForVar, "-", "_", -1)
+ nameForVar = strings.Replace(nameForVar, ":", "_", -1)
+
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
@@ -27,7 +47,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
}
-Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
+[scriptblock]$__%[2]sCompleterBlock = {
param(
$WordToComplete,
$CommandAst,
@@ -50,18 +70,19 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
if ($Command.Length -gt $CursorPosition) {
$Command=$Command.Substring(0,$CursorPosition)
}
- __%[1]s_debug "Truncated command: $Command"
+ __%[1]s_debug "Truncated command: $Command"
- $ShellCompDirectiveError=%[3]d
- $ShellCompDirectiveNoSpace=%[4]d
- $ShellCompDirectiveNoFileComp=%[5]d
- $ShellCompDirectiveFilterFileExt=%[6]d
- $ShellCompDirectiveFilterDirs=%[7]d
+ $ShellCompDirectiveError=%[4]d
+ $ShellCompDirectiveNoSpace=%[5]d
+ $ShellCompDirectiveNoFileComp=%[6]d
+ $ShellCompDirectiveFilterFileExt=%[7]d
+ $ShellCompDirectiveFilterDirs=%[8]d
- # Prepare the command to request completions for the program.
+ # Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
$Program,$Arguments = $Command.Split(" ",2)
- $RequestComp="$Program %[2]s $Arguments"
+
+ $RequestComp="$Program %[3]s $Arguments"
__%[1]s_debug "RequestComp: $RequestComp"
# we cannot use $WordToComplete because it
@@ -86,15 +107,17 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
# We add an extra empty parameter so we can indicate this to the go method.
__%[1]s_debug "Adding extra empty parameter"
`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+`
-`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+`
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
}
__%[1]s_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ $env:%[9]s=0
+
#call the command store the output in $out and redirect stderr and stdout to null
# $Out is an array contains each line per element
Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
-
# get directive from last line
[int]$Directive = $Out[-1].TrimStart(':')
if ($Directive -eq "") {
@@ -140,19 +163,6 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
$Space = ""
}
- if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
- __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
-
- if ($Values.Length -eq 0) {
- # Just print an empty string here so the
- # shell does not start to complete paths.
- # We cannot use CompletionResult here because
- # it does not accept an empty string as argument.
- ""
- return
- }
- }
-
if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
(($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
__%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
@@ -165,20 +175,33 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
# filter the result
$_.Name -like "$WordToComplete*"
- # Join the flag back if we have a equal sign flag
+ # Join the flag back if we have an equal sign flag
if ( $IsEqualFlag ) {
__%[1]s_debug "Join the equal sign flag back to the completion value"
$_.Name = $Flag + "=" + $_.Name
}
}
+ if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
+ __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
+
+ if ($Values.Length -eq 0) {
+ # Just print an empty string here so the
+ # shell does not start to complete paths.
+ # We cannot use CompletionResult here because
+ # it does not accept an empty string as argument.
+ ""
+ return
+ }
+ }
+
# Get the current mode
$Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
__%[1]s_debug "Mode: $Mode"
$Values | ForEach-Object {
- # store temporay because switch will overwrite $_
+ # store temporary because switch will overwrite $_
$comp = $_
# PowerShell supports three different completion modes
@@ -233,16 +256,18 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
Default {
# Like MenuComplete but we don't want to add a space here because
# the user need to press space anyway to get the completion.
- # Description will not be shown because thats not possible with TabCompleteNext
+ # Description will not be shown because that's not possible with TabCompleteNext
[System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
}
}
}
}
-`, name, compCmd,
+
+Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock
+`, name, nameForVar, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md
index d98a71e36f..6865f88e79 100644
--- a/vendor/github.com/spf13/cobra/projects_using_cobra.md
+++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md
@@ -1,38 +1,60 @@
## Projects using Cobra
+- [Allero](https://github.com/allero-io/allero)
- [Arduino CLI](https://github.com/arduino/arduino-cli)
-- [Bleve](http://www.blevesearch.com/)
-- [CockroachDB](http://www.cockroachlabs.com/)
+- [Bleve](https://blevesearch.com/)
+- [Cilium](https://cilium.io/)
+- [CloudQuery](https://github.com/cloudquery/cloudquery)
+- [CockroachDB](https://www.cockroachlabs.com/)
- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
+- [Datree](https://github.com/datreeio/datree)
- [Delve](https://github.com/derekparker/delve)
- [Docker (distribution)](https://github.com/docker/distribution)
- [Etcd](https://etcd.io/)
- [Gardener](https://github.com/gardener/gardenctl)
- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl)
- [Git Bump](https://github.com/erdaltsksn/git-bump)
-- [Github CLI](https://github.com/cli/cli)
+- [GitHub CLI](https://github.com/cli/cli)
- [GitHub Labeler](https://github.com/erdaltsksn/gh-label)
- [Golangci-lint](https://golangci-lint.run)
-- [GopherJS](http://www.gopherjs.org/)
+- [GopherJS](https://github.com/gopherjs/gopherjs)
+- [GoReleaser](https://goreleaser.com)
- [Helm](https://helm.sh)
- [Hugo](https://gohugo.io)
+- [Infracost](https://github.com/infracost/infracost)
- [Istio](https://istio.io)
- [Kool](https://github.com/kool-dev/kool)
-- [Kubernetes](http://kubernetes.io/)
+- [Kubernetes](https://kubernetes.io/)
+- [Kubescape](https://github.com/armosec/kubescape)
+- [KubeVirt](https://github.com/kubevirt/kubevirt)
- [Linkerd](https://linkerd.io/)
- [Mattermost-server](https://github.com/mattermost/mattermost-server)
+- [Mercure](https://mercure.rocks/)
+- [Meroxa CLI](https://github.com/meroxa/cli)
- [Metal Stack CLI](https://github.com/metal-stack/metalctl)
- [Moby (former Docker)](https://github.com/moby/moby)
+- [Moldy](https://github.com/Moldy-Community/moldy)
+- [Multi-gitter](https://github.com/lindell/multi-gitter)
- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+- [nFPM](https://nfpm.goreleaser.com)
+- [Okteto](https://github.com/okteto/okteto)
- [OpenShift](https://www.openshift.com/)
- [Ory Hydra](https://github.com/ory/hydra)
- [Ory Kratos](https://github.com/ory/kratos)
+- [Pixie](https://github.com/pixie-io/pixie)
+- [Polygon Edge](https://github.com/0xPolygon/polygon-edge)
- [Pouch](https://github.com/alibaba/pouch)
-- [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+- [ProjectAtomic (enterprise)](https://www.projectatomic.io/)
- [Prototool](https://github.com/uber/prototool)
+- [Pulumi](https://www.pulumi.com)
+- [QRcp](https://github.com/claudiodangelis/qrcp)
- [Random](https://github.com/erdaltsksn/random)
- [Rclone](https://rclone.org/)
+- [Scaleway CLI](https://github.com/scaleway/scaleway-cli)
- [Skaffold](https://skaffold.dev/)
- [Tendermint](https://github.com/tendermint/tendermint)
- [Twitch CLI](https://github.com/twitchdev/twitch-cli)
+- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli)
+- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework)
- [Werf](https://werf.io/)
+- [ZITADEL](https://github.com/zitadel/zitadel)
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
index d99bf91e5f..126e83c307 100644
--- a/vendor/github.com/spf13/cobra/shell_completions.go
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -1,3 +1,17 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package cobra
import (
diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md
index cd533ac3d4..553ee5df8a 100644
--- a/vendor/github.com/spf13/cobra/shell_completions.md
+++ b/vendor/github.com/spf13/cobra/shell_completions.md
@@ -7,10 +7,21 @@ The currently supported shells are:
- fish
- PowerShell
-If you are using the generator, you can create a completion command by running
+Cobra will automatically provide your program with a fully functional `completion` command,
+similarly to how it provides the `help` command.
+
+## Creating your own completion command
+
+If you do not wish to use the default `completion` command, you can choose to
+provide your own, which will take precedence over the default one. (This also provides
+backwards-compatibility with programs that already have their own `completion` command.)
+
+If you are using the `cobra-cli` generator,
+which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli),
+you can create a completion command by running
```bash
-cobra add completion
+cobra-cli add completion
```
and then modifying the generated `cmd/completion.go` file to look something like this
(writing the shell script to stdout allows the most flexible use):
@@ -19,17 +30,17 @@ and then modifying the generated `cmd/completion.go` file to look something like
var completionCmd = &cobra.Command{
Use: "completion [bash|zsh|fish|powershell]",
Short: "Generate completion script",
- Long: `To load completions:
+ Long: fmt.Sprintf(`To load completions:
Bash:
- $ source <(yourprogram completion bash)
+ $ source <(%[1]s completion bash)
# To load completions for each session, execute once:
# Linux:
- $ yourprogram completion bash > /etc/bash_completion.d/yourprogram
+ $ %[1]s completion bash > /etc/bash_completion.d/%[1]s
# macOS:
- $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram
+ $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
Zsh:
@@ -39,25 +50,25 @@ Zsh:
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
# To load completions for each session, execute once:
- $ yourprogram completion zsh > "${fpath[1]}/_yourprogram"
+ $ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
# You will need to start a new shell for this setup to take effect.
fish:
- $ yourprogram completion fish | source
+ $ %[1]s completion fish | source
# To load completions for each session, execute once:
- $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
+ $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
PowerShell:
- PS> yourprogram completion powershell | Out-String | Invoke-Expression
+ PS> %[1]s completion powershell | Out-String | Invoke-Expression
# To load completions for every new session, run:
- PS> yourprogram completion powershell > yourprogram.ps1
+ PS> %[1]s completion powershell > %[1]s.ps1
# and source this file from your PowerShell profile.
-`,
+`,cmd.Root().Name()),
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
Args: cobra.ExactValidArgs(1),
@@ -70,7 +81,7 @@ PowerShell:
case "fish":
cmd.Root().GenFishCompletion(os.Stdout, true)
case "powershell":
- cmd.Root().GenPowerShellCompletion(os.Stdout)
+ cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
}
},
}
@@ -78,6 +89,31 @@ PowerShell:
**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed.
+## Adapting the default completion command
+
+Cobra provides a few options for the default `completion` command. To configure such options you must set
+the `CompletionOptions` field on the *root* command.
+
+To tell Cobra *not* to provide the default `completion` command:
+```
+rootCmd.CompletionOptions.DisableDefaultCmd = true
+```
+
+To tell Cobra to mark the default `completion` command as *hidden*:
+```
+rootCmd.CompletionOptions.HiddenDefaultCmd = true
+```
+
+To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands:
+```
+rootCmd.CompletionOptions.DisableNoDescFlag = true
+```
+
+To tell Cobra to completely disable descriptions for completions:
+```
+rootCmd.CompletionOptions.DisableDescriptions = true
+```
+
# Customizing completions
The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values.
@@ -91,7 +127,7 @@ For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns
Some simplified code from `kubectl get` looks like:
```go
-validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+validArgs = []string{ "pod", "node", "service", "replicationcontroller" }
cmd := &cobra.Command{
Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
@@ -117,7 +153,7 @@ node pod replicationcontroller service
If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
```go
-argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
cmd := &cobra.Command{
...
@@ -323,7 +359,10 @@ cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string,
```
### Descriptions for completions
-`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh:
+Cobra provides support for completion descriptions. Such descriptions are supported for each shell
+(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)).
+For commands and flags, Cobra will provide the descriptions automatically, based on usage information.
+For example, using zsh:
```
$ helm s[tab]
search -- search for a keyword in charts
@@ -336,7 +375,7 @@ $ helm s[tab]
search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
```
-Cobra allows you to add annotations to your own completions. Simply add the annotation text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example:
+Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example:
```go
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp
@@ -371,6 +410,37 @@ completion firstcommand secondcommand
For backward compatibility, Cobra still supports its bash legacy dynamic completion solution.
Please refer to [Bash Completions](bash_completions.md) for details.
+### Bash completion V2
+
+Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling
+`GenBashCompletion()` or `GenBashCompletionFile()`.
+
+A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or
+`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion
+(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion
+solution described in this document.
+Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash
+completion V2 solution which provides the following extra features:
+- Supports completion descriptions (like the other shells)
+- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines)
+- Streamlined user experience thanks to a completion behavior aligned with the other shells
+
+`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()`
+you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra
+will provide the description automatically based on usage information. You can choose to make this option configurable by
+your users.
+
+```
+# With descriptions
+$ helm s[tab][tab]
+search (search for a keyword in charts) status (display the status of the named release)
+show (show information of a chart)
+
+# Without descriptions
+$ helm s[tab][tab]
+search show status
+```
+**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command.
## Zsh completions
Cobra supports native zsh completion generated from the root `cobra.Command`.
@@ -465,6 +535,21 @@ search for a keyword in charts
$ helm s[tab]
search show status
```
+### Aliases
+
+You can also configure `powershell` aliases for your program and they will also support completions.
+
+```
+$ sal aliasname origcommand
+$ Register-ArgumentCompleter -CommandName 'aliasname' -ScriptBlock $__origcommandCompleterBlock
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$ aliasname <tab>
+completion firstcommand secondcommand
+```
+The name of the completer block variable is of the form `$__<programName>CompleterBlock` where every `-` and `:` in the program name have been replaced with `_`, to respect powershell naming syntax.
### Limitations
diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md
new file mode 100644
index 0000000000..e55367e853
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/user_guide.md
@@ -0,0 +1,695 @@
+# User Guide
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+ ▾ appName/
+ ▾ cmd/
+ add.go
+ your.go
+ commands.go
+ here.go
+ main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra-CLI is its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+ love by spf13 and friends in Go.
+ Complete documentation is available at https://gohugo.io/documentation/`,
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+var (
+ // Used for flags.
+ cfgFile string
+ userLicense string
+
+ rootCmd = &cobra.Command{
+ Use: "cobra-cli",
+ Short: "A generator for Cobra based Applications",
+ Long: `Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+ }
+)
+
+// Execute executes the root command.
+func Execute() error {
+ return rootCmd.Execute()
+}
+
+func init() {
+ cobra.OnInitialize(initConfig)
+
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
+ rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
+ viper.SetDefault("license", "apache")
+
+ rootCmd.AddCommand(addCmd)
+ rootCmd.AddCommand(initCmd)
+}
+
+func initConfig() {
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := os.UserHomeDir()
+ cobra.CheckErr(err)
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigType("yaml")
+ viper.SetConfigName(".cobra")
+ }
+
+ viper.AutomaticEnv()
+
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
+}
+```
+
+### Returning and handling errors
+
+If you wish to return an error to the caller of a command, `RunE` can be used.
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(tryCmd)
+}
+
+var tryCmd = &cobra.Command{
+ Use: "try",
+ Short: "Try and possibly fail at something",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if err := someFunc(); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+```
+
+The error can then be caught at the execute function call.
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent', meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally, which will only apply to that specific command.
+
+```go
+localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default, Cobra only parses local flags on the target command, and any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+ Use: "print [OPTIONS] [COMMANDS]",
+ TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example, the persistent flag `author` is bound with `viper`.
+**Note**: the variable `author` will not be set to the value from config,
+when the `--author` flag is provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+Or, for persistent flags:
+```go
+rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkPersistentFlagRequired("region")
+```
+
+### Flag Groups
+
+If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then
+Cobra can enforce that requirement:
+```go
+rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)")
+rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)")
+rootCmd.MarkFlagsRequiredTogether("username", "password")
+```
+
+You can also prevent different flags from being provided together if they represent mutually
+exclusive options such as specifying an output format as either `--json` or `--yaml` but never both:
+```go
+rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON")
+rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML")
+rootCmd.MarkFlagsMutuallyExclusive("json", "yaml")
+```
+
+In both of these cases:
+ - both local and persistent flags can be used
+ - **NOTE:** the group is only enforced on commands where every flag is defined
+ - a flag may appear in multiple groups
+ - a group may contain any number of flags
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field of `Command`.
+The following validators are built in:
+
+- Number of arguments:
+ - `NoArgs` - report an error if there are any positional args.
+ - `ArbitraryArgs` - accept any number of args.
+ - `MinimumNArgs(int)` - report an error if less than N positional args are provided.
+ - `MaximumNArgs(int)` - report an error if more than N positional args are provided.
+ - `ExactArgs(int)` - report an error if there are not exactly N positional args.
+ - `RangeArgs(min, max)` - report an error if the number of args is not between `min` and `max`.
+- Content of the arguments:
+ - `OnlyValidArgs` - report an error if there are any positional args not specified in the `ValidArgs` field of `Command`, which can optionally be set to a list of valid values for positional args.
+
+If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`.
+
+Moreover, `MatchAll(pargs ...PositionalArgs)` enables combining existing checks with arbitrary other checks.
+For instance, if you want to report an error if there are not exactly N positional args OR if there are any positional
+args that are not in the `ValidArgs` field of `Command`, you can call `MatchAll` on `ExactArgs` and `OnlyValidArgs`, as
+shown below:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: MatchAll(ExactArgs(2), OnlyValidArgs),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+It is possible to set any custom validator that satisfies `func(cmd *cobra.Command, args []string) error`.
+For example:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ // Optionally run one of the validators provided by cobra
+ if err := cobra.MinimumNArgs(1)(cmd, args); err != nil {
+ return err
+ }
+ // Run the custom validation logic
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable, meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdTimes = &cobra.Command{
+ Use: "times [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called. Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+ $ cobra-cli help
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.
+
+ Usage:
+ cobra-cli [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ completion Generate the autocompletion script for the specified shell
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra-cli
+ -l, --license string name of license for the project
+ --viper use Viper for configuration
+
+ Use "cobra-cli [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Grouping commands in help
+
+Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly
+defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element
+of that subcommand. The groups will appear in the help output in the same order as they are defined using different
+calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using
+`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with the following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+ $ cobra-cli --invalid
+ Error: unknown flag: --invalid
+ Usage:
+ cobra-cli [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ completion Generate the autocompletion script for the specified shell
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra-cli
+ -l, --license string name of license for the project
+ --viper use Viper for configuration
+
+ Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+ server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatically generated based on existing subcommands and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but make sense in your set of commands but for which
+you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+ delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md).
+
+## Generating shell completions
+
+Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
+
+## Providing Active Help
+
+Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md).
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
index 2e840285f3..84cec76fde 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -1,3 +1,17 @@
+// Copyright 2013-2022 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package cobra
import (
@@ -75,7 +89,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
- WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
# zsh completion for %-36[1]s -*- shell-script -*-
@@ -95,7 +109,7 @@ _%[1]s()
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
- local lastParam lastChar flagPrefix requestComp out directive compCount comp lastComp
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace
local -a completions
__%[1]s_debug "\n========= starting completion logic =========="
@@ -163,8 +177,24 @@ _%[1]s()
return
fi
- compCount=0
+ local activeHelpMarker="%[8]s"
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __%[1]s_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __%[1]s_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
# The description is preceded by a TAB character.
@@ -172,16 +202,31 @@ _%[1]s()
# We first need to escape any : as part of the completion itself.
comp=${comp//:/\\:}
- local tab=$(printf '\t')
+ local tab="$(printf '\t')"
comp=${comp//$tab/:}
- ((compCount++))
__%[1]s_debug "Adding completion: ${comp}"
completions+=${comp}
lastComp=$comp
fi
done < <(printf "%%s\n" "${out[@]}")
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __%[1]s_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ __%[1]s_debug "Activating nospace."
+ noSpace="-S ''"
+ fi
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local filteringCmd
@@ -199,7 +244,7 @@ _%[1]s()
_arguments '*:filename:'"$filteringCmd"
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
- local subDir
+ local subdir
subdir="${completions[1]}"
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
@@ -208,33 +253,49 @@ _%[1]s()
__%[1]s_debug "Listing directories in ."
fi
+ local result
_arguments '*:dirname:_files -/'" ${flagPrefix}"
+ result=$?
if [ -n "$subdir" ]; then
popd >/dev/null 2>&1
fi
- elif [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ] && [ ${compCount} -eq 1 ]; then
- __%[1]s_debug "Activating nospace."
- # We can use compadd here as there is no description when
- # there is only one completion.
- compadd -S '' "${lastComp}"
- elif [ ${compCount} -eq 0 ]; then
- if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
- __%[1]s_debug "deactivating file completion"
+ return $result
+ else
+ __%[1]s_debug "Calling _describe"
+ if eval _describe "completions" completions $flagPrefix $noSpace; then
+ __%[1]s_debug "_describe found some completions"
+
+ # Return the success of having called _describe
+ return 0
else
- # Perform file completion
- __%[1]s_debug "activating file completion"
- _arguments '*:filename:_files'" ${flagPrefix}"
+ __%[1]s_debug "_describe did not find completions."
+ __%[1]s_debug "Checking if we should do file completion."
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ __%[1]s_debug "deactivating file completion"
+
+ # We must return an error code here to let zsh know that there were no
+ # completions found by _describe; this is what will trigger other
+ # matching algorithms to attempt to find completions.
+ # For example zsh can match letters in the middle of words.
+ return 1
+ else
+ # Perform file completion
+ __%[1]s_debug "Activating file completion"
+
+ # We must return the result of this command, so it must be the
+ # last command, or else we must store its result to return it.
+ _arguments '*:filename:_files'" ${flagPrefix}"
+ fi
fi
- else
- _describe "completions" completions $(echo $flagPrefix)
fi
}
# don't run the completion function when being source-ed or eval-ed
if [ "$funcstack[1]" = "_%[1]s" ]; then
- _%[1]s
+ _%[1]s
fi
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ activeHelpMarker))
}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go
new file mode 100644
index 0000000000..55248888c6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go
@@ -0,0 +1,60 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "bufio"
+ "io"
+ "io/fs"
+ "os"
+)
+
+// FileReader is a wrapper of io.Reader. It also provides file info.
+type FileReader interface {
+ io.Reader
+ FileInfo() (fs.FileInfo, error)
+}
+
+type fileReader struct {
+ *os.File
+}
+
+func NewFileReader(f *os.File) FileReader {
+ return &fileReader{f}
+}
+
+func (fr *fileReader) FileInfo() (fs.FileInfo, error) {
+ return fr.Stat()
+}
+
+// FileBufReader is a wrapper of bufio.Reader. It also provides file info.
+type FileBufReader struct {
+ *bufio.Reader
+ fi fs.FileInfo
+}
+
+func NewFileBufReader(fr FileReader) *FileBufReader {
+ bufReader := bufio.NewReader(fr)
+ fi, err := fr.FileInfo()
+ if err != nil {
+ // This should never happen.
+ panic(err)
+ }
+ return &FileBufReader{bufReader, fi}
+}
+
+func (fbr *FileBufReader) FileInfo() fs.FileInfo {
+ return fbr.fi
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
index e442c3c92e..d31ece3e24 100644
--- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
@@ -44,16 +44,12 @@ func IsDirWriteable(dir string) error {
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
-func TouchDirAll(dir string) error {
+func TouchDirAll(lg *zap.Logger, dir string) error {
// If path is already a directory, MkdirAll does nothing and returns nil, so,
// first check if dir exist with an expected permission mode.
if Exist(dir) {
err := CheckDirPermission(dir, PrivateDirMode)
if err != nil {
- lg, _ := zap.NewProduction()
- if lg == nil {
- lg = zap.NewExample()
- }
lg.Warn("check file permission", zap.Error(err))
}
} else {
@@ -70,8 +66,8 @@ func TouchDirAll(dir string) error {
// CreateDirAll is similar to TouchDirAll but returns error
// if the deepest directory was not empty.
-func CreateDirAll(dir string) error {
- err := TouchDirAll(dir)
+func CreateDirAll(lg *zap.Logger, dir string) error {
+ err := TouchDirAll(lg, dir)
if err == nil {
var ns []string
ns, err = ReadDir(dir)
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
index e8ac0ca6f5..f4492009d6 100644
--- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
@@ -41,6 +41,12 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval
lg = zap.NewNop()
}
errC := make(chan error, 1)
+ lg.Info("started to purge file",
+ zap.String("dir", dirname),
+ zap.String("suffix", suffix),
+ zap.Uint("max", max),
+ zap.Duration("interval", interval))
+
go func() {
if donec != nil {
defer close(donec)
@@ -63,14 +69,16 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval
f := filepath.Join(dirname, newfnames[0])
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
if err != nil {
+ lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err))
break
}
if err = os.Remove(f); err != nil {
+ lg.Error("failed to remove file", zap.String("path", f), zap.Error(err))
errC <- err
return
}
if err = l.Close(); err != nil {
- lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
+ lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
errC <- err
return
}
diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go b/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go
index 0251a72133..2656d286ac 100644
--- a/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go
+++ b/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go
@@ -15,12 +15,13 @@
package wal
import (
- "bufio"
"encoding/binary"
+ "fmt"
"hash"
"io"
"sync"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/crc"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3/raftpb"
@@ -34,17 +35,17 @@ const frameSizeBytes = 8
type decoder struct {
mu sync.Mutex
- brs []*bufio.Reader
+ brs []*fileutil.FileBufReader
// lastValidOff file offset following the last valid decoded record
lastValidOff int64
crc hash.Hash32
}
-func newDecoder(r ...io.Reader) *decoder {
- readers := make([]*bufio.Reader, len(r))
+func newDecoder(r ...fileutil.FileReader) *decoder {
+ readers := make([]*fileutil.FileBufReader, len(r))
for i := range r {
- readers[i] = bufio.NewReader(r[i])
+ readers[i] = fileutil.NewFileBufReader(r[i])
}
return &decoder{
brs: readers,
@@ -59,17 +60,13 @@ func (d *decoder) decode(rec *walpb.Record) error {
return d.decodeRecord(rec)
}
-// raft max message size is set to 1 MB in etcd server
-// assume projects set reasonable message size limit,
-// thus entry size should never exceed 10 MB
-const maxWALEntrySizeLimit = int64(10 * 1024 * 1024)
-
func (d *decoder) decodeRecord(rec *walpb.Record) error {
if len(d.brs) == 0 {
return io.EOF
}
- l, err := readInt64(d.brs[0])
+ fileBufReader := d.brs[0]
+ l, err := readInt64(fileBufReader)
if err == io.EOF || (err == nil && l == 0) {
// hit end of file or preallocated space
d.brs = d.brs[1:]
@@ -84,12 +81,15 @@ func (d *decoder) decodeRecord(rec *walpb.Record) error {
}
recBytes, padBytes := decodeFrameSize(l)
- if recBytes >= maxWALEntrySizeLimit-padBytes {
- return ErrMaxWALEntrySizeLimitExceeded
+ // The length of current WAL entry must be less than the remaining file size.
+ maxEntryLimit := fileBufReader.FileInfo().Size() - d.lastValidOff - padBytes
+ if recBytes > maxEntryLimit {
+ return fmt.Errorf("wal: max entry size limit exceeded, recBytes: %d, fileSize(%d) - offset(%d) - padBytes(%d) = entryLimit(%d)",
+ recBytes, fileBufReader.FileInfo().Size(), d.lastValidOff, padBytes, maxEntryLimit)
}
data := make([]byte, recBytes+padBytes)
- if _, err = io.ReadFull(d.brs[0], data); err != nil {
+ if _, err = io.ReadFull(fileBufReader, data); err != nil {
// ReadFull returns io.EOF only if no bytes were read
// the decoder should treat this as an ErrUnexpectedEOF instead.
if err == io.EOF {
diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/repair.go b/vendor/go.etcd.io/etcd/server/v3/wal/repair.go
index 122ee49a6a..0ed8425463 100644
--- a/vendor/go.etcd.io/etcd/server/v3/wal/repair.go
+++ b/vendor/go.etcd.io/etcd/server/v3/wal/repair.go
@@ -40,7 +40,7 @@ func Repair(lg *zap.Logger, dirpath string) bool {
lg.Info("repairing", zap.String("path", f.Name()))
rec := &walpb.Record{}
- decoder := newDecoder(f)
+ decoder := newDecoder(fileutil.NewFileReader(f.File))
for {
lastOffset := decoder.lastOffset()
err := decoder.decode(rec)
diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/wal.go b/vendor/go.etcd.io/etcd/server/v3/wal/wal.go
index 3c940e0cde..01d0c28d6b 100644
--- a/vendor/go.etcd.io/etcd/server/v3/wal/wal.go
+++ b/vendor/go.etcd.io/etcd/server/v3/wal/wal.go
@@ -54,15 +54,14 @@ var (
// so that tests can set a different segment size.
SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
- ErrMetadataConflict = errors.New("wal: conflicting metadata found")
- ErrFileNotFound = errors.New("wal: file not found")
- ErrCRCMismatch = errors.New("wal: crc mismatch")
- ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
- ErrSnapshotNotFound = errors.New("wal: snapshot not found")
- ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
- ErrMaxWALEntrySizeLimitExceeded = errors.New("wal: max entry size limit exceeded")
- ErrDecoderNotFound = errors.New("wal: decoder not found")
- crcTable = crc32.MakeTable(crc32.Castagnoli)
+ ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+ ErrFileNotFound = errors.New("wal: file not found")
+ ErrCRCMismatch = errors.New("wal: crc mismatch")
+ ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+ ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+ ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
+ ErrDecoderNotFound = errors.New("wal: decoder not found")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
)
// WAL is a logical representation of the stable storage.
@@ -116,7 +115,7 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
}
defer os.RemoveAll(tmpdirpath)
- if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
+ if err := fileutil.CreateDirAll(lg, tmpdirpath); err != nil {
lg.Warn(
"failed to create a temporary WAL directory",
zap.String("tmp-dir-path", tmpdirpath),
@@ -378,12 +377,13 @@ func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]stri
return names, nameIndex, nil
}
-func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) {
+func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]fileutil.FileReader, []*fileutil.LockedFile, func() error, error) {
rcs := make([]io.ReadCloser, 0)
- rs := make([]io.Reader, 0)
+ rs := make([]fileutil.FileReader, 0)
ls := make([]*fileutil.LockedFile, 0)
for _, name := range names[nameIndex:] {
p := filepath.Join(dirpath, name)
+ var f *os.File
if write {
l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
if err != nil {
@@ -392,6 +392,7 @@ func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int,
}
ls = append(ls, l)
rcs = append(rcs, l)
+ f = l.File
} else {
rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
if err != nil {
@@ -400,8 +401,10 @@ func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int,
}
ls = append(ls, nil)
rcs = append(rcs, rf)
+ f = rf
}
- rs = append(rs, rcs[len(rcs)-1])
+ fileReader := fileutil.NewFileReader(f)
+ rs = append(rs, fileReader)
}
closer := func() error { return closeAll(lg, rcs...) }
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
index c3fa253893..2e337a0ed5 100644
--- a/vendor/go.uber.org/atomic/.gitignore
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -10,3 +10,6 @@ lint.log
# Profiling output
*.prof
+
+# Output of fossa analyzer
+/fossa
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
deleted file mode 100644
index 13d0a4f254..0000000000
--- a/vendor/go.uber.org/atomic/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/atomic
-
-env:
- global:
- - GO111MODULE=on
-
-matrix:
- include:
- - go: oldstable
- - go: stable
- env: LINT=1
-
-cache:
- directories:
- - vendor
-
-before_install:
- - go version
-
-script:
- - test -z "$LINT" || make lint
- - make cover
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
index 24c0274dc3..38f564e2b3 100644
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -4,6 +4,20 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [1.9.0] - 2021-07-15
+### Added
+- Add `Float64.Swap` to match int atomic operations.
+- Add `atomic.Time` type for atomic operations on `time.Time` values.
+
+[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
+
+## [1.8.0] - 2021-06-09
+### Added
+- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
+- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
+
+[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
+
## [1.7.0] - 2020-09-14
### Added
- Support JSON serialization and deserialization of primitive atomic types.
@@ -15,32 +29,46 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Removed
- Remove dependency on `golang.org/x/{lint, tools}`.
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+
## [1.6.0] - 2020-02-24
### Changed
- Drop library dependency on `golang.org/x/{lint, tools}`.
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+
## [1.5.1] - 2019-11-19
- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
causing `CAS` to fail even though the old value matches.
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+
## [1.5.0] - 2019-10-29
### Changed
- With Go modules, only the `go.uber.org/atomic` import path is supported now.
If you need to use the old import path, please add a `replace` directive to
your `go.mod`.
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+
## [1.4.0] - 2019-05-01
### Added
- Add `atomic.Error` type for atomic operations on `error` values.
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+
## [1.3.2] - 2018-05-02
### Added
- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+
## [1.3.1] - 2017-11-14
### Fixed
- Revert optimization for `atomic.String.Store("")` which caused data races.
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+
## [1.3.0] - 2017-11-13
### Added
- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
@@ -48,10 +76,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Optimize `atomic.String.Store("")` by avoiding an allocation.
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+
## [1.2.0] - 2017-04-12
### Added
- Shadow `atomic.Value` from `sync/atomic`.
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+
## [1.1.0] - 2017-03-10
### Added
- Add atomic `Float64` type.
@@ -59,18 +91,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Support new `go.uber.org/atomic` import path.
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+
## [1.0.0] - 2016-07-18
- Initial release.
-[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
-[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
-[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
-[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
-[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
-[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
-[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
-[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
-[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
index 1b1376d425..46c945b32b 100644
--- a/vendor/go.uber.org/atomic/Makefile
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -69,6 +69,7 @@ generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
generatenodirty:
@[ -z "$$(git status --porcelain)" ] || ( \
echo "Working tree is dirty. Commit your changes first."; \
+ git status; \
exit 1 )
@make generate
@status=$$(git status --porcelain); \
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
index ade0c20f16..96b47a1f12 100644
--- a/vendor/go.uber.org/atomic/README.md
+++ b/vendor/go.uber.org/atomic/README.md
@@ -55,8 +55,8 @@ Released under the [MIT License](LICENSE.txt).
[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
[doc]: https://godoc.org/go.uber.org/atomic
-[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/atomic
+[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/atomic
[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
index 9cf1914b1f..209df7bbcd 100644
--- a/vendor/go.uber.org/atomic/bool.go
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,10 +36,10 @@ type Bool struct {
var _zeroBool bool
// NewBool creates a new Bool.
-func NewBool(v bool) *Bool {
+func NewBool(val bool) *Bool {
x := &Bool{}
- if v != _zeroBool {
- x.Store(v)
+ if val != _zeroBool {
+ x.Store(val)
}
return x
}
@@ -50,19 +50,19 @@ func (x *Bool) Load() bool {
}
// Store atomically stores the passed bool.
-func (x *Bool) Store(v bool) {
- x.v.Store(boolToInt(v))
+func (x *Bool) Store(val bool) {
+ x.v.Store(boolToInt(val))
}
// CAS is an atomic compare-and-swap for bool values.
-func (x *Bool) CAS(o, n bool) bool {
- return x.v.CAS(boolToInt(o), boolToInt(n))
+func (x *Bool) CAS(old, new bool) (swapped bool) {
+ return x.v.CAS(boolToInt(old), boolToInt(new))
}
// Swap atomically stores the given bool and returns the old
// value.
-func (x *Bool) Swap(o bool) bool {
- return truthy(x.v.Swap(boolToInt(o)))
+func (x *Bool) Swap(val bool) (old bool) {
+ return truthy(x.v.Swap(boolToInt(val)))
}
// MarshalJSON encodes the wrapped bool into JSON.
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
index c7bf7a827a..a2e60e9873 100644
--- a/vendor/go.uber.org/atomic/bool_ext.go
+++ b/vendor/go.uber.org/atomic/bool_ext.go
@@ -38,7 +38,7 @@ func boolToInt(b bool) uint32 {
}
// Toggle atomically negates the Boolean and returns the previous value.
-func (b *Bool) Toggle() bool {
+func (b *Bool) Toggle() (old bool) {
for {
old := b.Load()
if b.CAS(old, !old) {
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
index 027cfcb20b..207594f5e8 100644
--- a/vendor/go.uber.org/atomic/duration.go
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -37,10 +37,10 @@ type Duration struct {
var _zeroDuration time.Duration
// NewDuration creates a new Duration.
-func NewDuration(v time.Duration) *Duration {
+func NewDuration(val time.Duration) *Duration {
x := &Duration{}
- if v != _zeroDuration {
- x.Store(v)
+ if val != _zeroDuration {
+ x.Store(val)
}
return x
}
@@ -51,19 +51,19 @@ func (x *Duration) Load() time.Duration {
}
// Store atomically stores the passed time.Duration.
-func (x *Duration) Store(v time.Duration) {
- x.v.Store(int64(v))
+func (x *Duration) Store(val time.Duration) {
+ x.v.Store(int64(val))
}
// CAS is an atomic compare-and-swap for time.Duration values.
-func (x *Duration) CAS(o, n time.Duration) bool {
- return x.v.CAS(int64(o), int64(n))
+func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
+ return x.v.CAS(int64(old), int64(new))
}
// Swap atomically stores the given time.Duration and returns the old
// value.
-func (x *Duration) Swap(o time.Duration) time.Duration {
- return time.Duration(x.v.Swap(int64(o)))
+func (x *Duration) Swap(val time.Duration) (old time.Duration) {
+ return time.Duration(x.v.Swap(int64(val)))
}
// MarshalJSON encodes the wrapped time.Duration into JSON.
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
index 6273b66bd6..4c18b0a9ed 100644
--- a/vendor/go.uber.org/atomic/duration_ext.go
+++ b/vendor/go.uber.org/atomic/duration_ext.go
@@ -25,13 +25,13 @@ import "time"
//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
// Add atomically adds to the wrapped time.Duration and returns the new value.
-func (d *Duration) Add(n time.Duration) time.Duration {
- return time.Duration(d.v.Add(int64(n)))
+func (d *Duration) Add(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(delta)))
}
// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
-func (d *Duration) Sub(n time.Duration) time.Duration {
- return time.Duration(d.v.Sub(int64(n)))
+func (d *Duration) Sub(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(delta)))
}
// String encodes the wrapped value as a string.
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
index a6166fbea0..3be19c35ee 100644
--- a/vendor/go.uber.org/atomic/error.go
+++ b/vendor/go.uber.org/atomic/error.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -32,10 +32,10 @@ type Error struct {
var _zeroError error
// NewError creates a new Error.
-func NewError(v error) *Error {
+func NewError(val error) *Error {
x := &Error{}
- if v != _zeroError {
- x.Store(v)
+ if val != _zeroError {
+ x.Store(val)
}
return x
}
@@ -46,6 +46,6 @@ func (x *Error) Load() error {
}
// Store atomically stores the passed error.
-func (x *Error) Store(v error) {
- x.v.Store(packError(v))
+func (x *Error) Store(val error) {
+ x.v.Store(packError(val))
}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
index 0719060207..8a13671847 100644
--- a/vendor/go.uber.org/atomic/float64.go
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -37,10 +37,10 @@ type Float64 struct {
var _zeroFloat64 float64
// NewFloat64 creates a new Float64.
-func NewFloat64(v float64) *Float64 {
+func NewFloat64(val float64) *Float64 {
x := &Float64{}
- if v != _zeroFloat64 {
- x.Store(v)
+ if val != _zeroFloat64 {
+ x.Store(val)
}
return x
}
@@ -51,13 +51,14 @@ func (x *Float64) Load() float64 {
}
// Store atomically stores the passed float64.
-func (x *Float64) Store(v float64) {
- x.v.Store(math.Float64bits(v))
+func (x *Float64) Store(val float64) {
+ x.v.Store(math.Float64bits(val))
}
-// CAS is an atomic compare-and-swap for float64 values.
-func (x *Float64) CAS(o, n float64) bool {
- return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
+// Swap atomically stores the given float64 and returns the old
+// value.
+func (x *Float64) Swap(val float64) (old float64) {
+ return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
}
// MarshalJSON encodes the wrapped float64 into JSON.
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
index 927b1add74..df36b0107f 100644
--- a/vendor/go.uber.org/atomic/float64_ext.go
+++ b/vendor/go.uber.org/atomic/float64_ext.go
@@ -20,15 +20,18 @@
package atomic
-import "strconv"
+import (
+ "math"
+ "strconv"
+)
-//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
// Add atomically adds to the wrapped float64 and returns the new value.
-func (f *Float64) Add(s float64) float64 {
+func (f *Float64) Add(delta float64) float64 {
for {
old := f.Load()
- new := old + s
+ new := old + delta
if f.CAS(old, new) {
return new
}
@@ -36,8 +39,27 @@ func (f *Float64) Add(s float64) float64 {
}
// Sub atomically subtracts from the wrapped float64 and returns the new value.
-func (f *Float64) Sub(s float64) float64 {
- return f.Add(-s)
+func (f *Float64) Sub(delta float64) float64 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+//
+// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CAS allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CAS loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CAS(old, new) {
+// break
+// }
+// }
+//
+// If CAS did not match NaN to match, then the above would loop forever.
+func (f *Float64) CAS(old, new float64) (swapped bool) {
+ return f.v.CAS(math.Float64bits(old), math.Float64bits(new))
}
// String encodes the wrapped value as a string.
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
index 50d6b24858..1e9ef4f879 100644
--- a/vendor/go.uber.org/atomic/gen.go
+++ b/vendor/go.uber.org/atomic/gen.go
@@ -24,3 +24,4 @@ package atomic
//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
+//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
index 18ae56493e..640ea36a17 100644
--- a/vendor/go.uber.org/atomic/int32.go
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Int32 struct {
}
// NewInt32 creates a new Int32.
-func NewInt32(i int32) *Int32 {
- return &Int32{v: i}
+func NewInt32(val int32) *Int32 {
+ return &Int32{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Int32) Load() int32 {
}
// Add atomically adds to the wrapped int32 and returns the new value.
-func (i *Int32) Add(n int32) int32 {
- return atomic.AddInt32(&i.v, n)
+func (i *Int32) Add(delta int32) int32 {
+ return atomic.AddInt32(&i.v, delta)
}
// Sub atomically subtracts from the wrapped int32 and returns the new value.
-func (i *Int32) Sub(n int32) int32 {
- return atomic.AddInt32(&i.v, -n)
+func (i *Int32) Sub(delta int32) int32 {
+ return atomic.AddInt32(&i.v, -delta)
}
// Inc atomically increments the wrapped int32 and returns the new value.
@@ -66,18 +66,18 @@ func (i *Int32) Dec() int32 {
}
// CAS is an atomic compare-and-swap.
-func (i *Int32) CAS(old, new int32) bool {
+func (i *Int32) CAS(old, new int32) (swapped bool) {
return atomic.CompareAndSwapInt32(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Int32) Store(n int32) {
- atomic.StoreInt32(&i.v, n)
+func (i *Int32) Store(val int32) {
+ atomic.StoreInt32(&i.v, val)
}
// Swap atomically swaps the wrapped int32 and returns the old value.
-func (i *Int32) Swap(n int32) int32 {
- return atomic.SwapInt32(&i.v, n)
+func (i *Int32) Swap(val int32) (old int32) {
+ return atomic.SwapInt32(&i.v, val)
}
// MarshalJSON encodes the wrapped int32 into JSON.
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
index 2bcbbfaa95..9ab66b9809 100644
--- a/vendor/go.uber.org/atomic/int64.go
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Int64 struct {
}
// NewInt64 creates a new Int64.
-func NewInt64(i int64) *Int64 {
- return &Int64{v: i}
+func NewInt64(val int64) *Int64 {
+ return &Int64{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Int64) Load() int64 {
}
// Add atomically adds to the wrapped int64 and returns the new value.
-func (i *Int64) Add(n int64) int64 {
- return atomic.AddInt64(&i.v, n)
+func (i *Int64) Add(delta int64) int64 {
+ return atomic.AddInt64(&i.v, delta)
}
// Sub atomically subtracts from the wrapped int64 and returns the new value.
-func (i *Int64) Sub(n int64) int64 {
- return atomic.AddInt64(&i.v, -n)
+func (i *Int64) Sub(delta int64) int64 {
+ return atomic.AddInt64(&i.v, -delta)
}
// Inc atomically increments the wrapped int64 and returns the new value.
@@ -66,18 +66,18 @@ func (i *Int64) Dec() int64 {
}
// CAS is an atomic compare-and-swap.
-func (i *Int64) CAS(old, new int64) bool {
+func (i *Int64) CAS(old, new int64) (swapped bool) {
return atomic.CompareAndSwapInt64(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Int64) Store(n int64) {
- atomic.StoreInt64(&i.v, n)
+func (i *Int64) Store(val int64) {
+ atomic.StoreInt64(&i.v, val)
}
// Swap atomically swaps the wrapped int64 and returns the old value.
-func (i *Int64) Swap(n int64) int64 {
- return atomic.SwapInt64(&i.v, n)
+func (i *Int64) Swap(val int64) (old int64) {
+ return atomic.SwapInt64(&i.v, val)
}
// MarshalJSON encodes the wrapped int64 into JSON.
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
index 225b7a2be0..80df93d094 100644
--- a/vendor/go.uber.org/atomic/string.go
+++ b/vendor/go.uber.org/atomic/string.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -32,10 +32,10 @@ type String struct {
var _zeroString string
// NewString creates a new String.
-func NewString(v string) *String {
+func NewString(val string) *String {
x := &String{}
- if v != _zeroString {
- x.Store(v)
+ if val != _zeroString {
+ x.Store(val)
}
return x
}
@@ -49,6 +49,6 @@ func (x *String) Load() string {
}
// Store atomically stores the passed string.
-func (x *String) Store(v string) {
- x.v.Store(v)
+func (x *String) Store(val string) {
+ x.v.Store(val)
}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
index 3a9558213d..83d92edafc 100644
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -21,6 +21,8 @@
package atomic
//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
+// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which
+// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351
// String returns the wrapped value.
func (s *String) String() string {
diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/atomic/time.go
index 264b0eac0d..33460fc37e 100644
--- a/vendor/go.uber.org/multierr/go113.go
+++ b/vendor/go.uber.org/atomic/time.go
@@ -1,4 +1,6 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,35 +20,36 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// +build go1.13
+package atomic
-package multierr
+import (
+ "time"
+)
-import "errors"
+// Time is an atomic type-safe wrapper for time.Time values.
+type Time struct {
+ _ nocmp // disallow non-atomic comparison
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
- for _, err := range merr.Errors() {
- if errors.As(err, target) {
- return true
- }
- }
- return false
+ v Value
}
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
- for _, err := range merr.Errors() {
- if errors.Is(err, target) {
- return true
- }
+var _zeroTime time.Time
+
+// NewTime creates a new Time.
+func NewTime(val time.Time) *Time {
+ x := &Time{}
+ if val != _zeroTime {
+ x.Store(val)
}
- return false
+ return x
+}
+
+// Load atomically loads the wrapped time.Time.
+func (x *Time) Load() time.Time {
+ return unpackTime(x.v.Load())
+}
+
+// Store atomically stores the passed time.Time.
+func (x *Time) Store(val time.Time) {
+ x.v.Store(packTime(val))
}
diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/atomic/time_ext.go
index 6b5dbda807..1e3dc978aa 100644
--- a/vendor/go.uber.org/zap/global_go112.go
+++ b/vendor/go.uber.org/atomic/time_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,19 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build go1.12
+package atomic
-package zap
+import "time"
-const _stdLogDefaultDepth = 1
+//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
+
+func packTime(t time.Time) interface{} {
+ return t
+}
+
+func unpackTime(v interface{}) time.Time {
+ if t, ok := v.(time.Time); ok {
+ return t
+ }
+ return time.Time{}
+}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
index a973aba1a6..7859a9cc3b 100644
--- a/vendor/go.uber.org/atomic/uint32.go
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Uint32 struct {
}
// NewUint32 creates a new Uint32.
-func NewUint32(i uint32) *Uint32 {
- return &Uint32{v: i}
+func NewUint32(val uint32) *Uint32 {
+ return &Uint32{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Uint32) Load() uint32 {
}
// Add atomically adds to the wrapped uint32 and returns the new value.
-func (i *Uint32) Add(n uint32) uint32 {
- return atomic.AddUint32(&i.v, n)
+func (i *Uint32) Add(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, delta)
}
// Sub atomically subtracts from the wrapped uint32 and returns the new value.
-func (i *Uint32) Sub(n uint32) uint32 {
- return atomic.AddUint32(&i.v, ^(n - 1))
+func (i *Uint32) Sub(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(delta - 1))
}
// Inc atomically increments the wrapped uint32 and returns the new value.
@@ -66,18 +66,18 @@ func (i *Uint32) Dec() uint32 {
}
// CAS is an atomic compare-and-swap.
-func (i *Uint32) CAS(old, new uint32) bool {
+func (i *Uint32) CAS(old, new uint32) (swapped bool) {
return atomic.CompareAndSwapUint32(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Uint32) Store(n uint32) {
- atomic.StoreUint32(&i.v, n)
+func (i *Uint32) Store(val uint32) {
+ atomic.StoreUint32(&i.v, val)
}
// Swap atomically swaps the wrapped uint32 and returns the old value.
-func (i *Uint32) Swap(n uint32) uint32 {
- return atomic.SwapUint32(&i.v, n)
+func (i *Uint32) Swap(val uint32) (old uint32) {
+ return atomic.SwapUint32(&i.v, val)
}
// MarshalJSON encodes the wrapped uint32 into JSON.
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
index 3b6c71fd5a..2f2a7db638 100644
--- a/vendor/go.uber.org/atomic/uint64.go
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Uint64 struct {
}
// NewUint64 creates a new Uint64.
-func NewUint64(i uint64) *Uint64 {
- return &Uint64{v: i}
+func NewUint64(val uint64) *Uint64 {
+ return &Uint64{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Uint64) Load() uint64 {
}
// Add atomically adds to the wrapped uint64 and returns the new value.
-func (i *Uint64) Add(n uint64) uint64 {
- return atomic.AddUint64(&i.v, n)
+func (i *Uint64) Add(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, delta)
}
// Sub atomically subtracts from the wrapped uint64 and returns the new value.
-func (i *Uint64) Sub(n uint64) uint64 {
- return atomic.AddUint64(&i.v, ^(n - 1))
+func (i *Uint64) Sub(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(delta - 1))
}
// Inc atomically increments the wrapped uint64 and returns the new value.
@@ -66,18 +66,18 @@ func (i *Uint64) Dec() uint64 {
}
// CAS is an atomic compare-and-swap.
-func (i *Uint64) CAS(old, new uint64) bool {
+func (i *Uint64) CAS(old, new uint64) (swapped bool) {
return atomic.CompareAndSwapUint64(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Uint64) Store(n uint64) {
- atomic.StoreUint64(&i.v, n)
+func (i *Uint64) Store(val uint64) {
+ atomic.StoreUint64(&i.v, val)
}
// Swap atomically swaps the wrapped uint64 and returns the old value.
-func (i *Uint64) Swap(n uint64) uint64 {
- return atomic.SwapUint64(&i.v, n)
+func (i *Uint64) Swap(val uint64) (old uint64) {
+ return atomic.SwapUint64(&i.v, val)
}
// MarshalJSON encodes the wrapped uint64 into JSON.
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
new file mode 100644
index 0000000000..ecf7a77273
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uintptr.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uintptr is an atomic wrapper around uintptr.
+type Uintptr struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uintptr
+}
+
+// NewUintptr creates a new Uintptr.
+func NewUintptr(val uintptr) *Uintptr {
+ return &Uintptr{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uintptr) Load() uintptr {
+ return atomic.LoadUintptr(&i.v)
+}
+
+// Add atomically adds to the wrapped uintptr and returns the new value.
+func (i *Uintptr) Add(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uintptr and returns the new value.
+func (i *Uintptr) Sub(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uintptr and returns the new value.
+func (i *Uintptr) Inc() uintptr {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uintptr and returns the new value.
+func (i *Uintptr) Dec() uintptr {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
+ return atomic.CompareAndSwapUintptr(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uintptr) Store(val uintptr) {
+ atomic.StoreUintptr(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uintptr and returns the old value.
+func (i *Uintptr) Swap(val uintptr) (old uintptr) {
+ return atomic.SwapUintptr(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uintptr into JSON.
+func (i *Uintptr) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uintptr.
+func (i *Uintptr) UnmarshalJSON(b []byte) error {
+ var v uintptr
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uintptr) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go
new file mode 100644
index 0000000000..169f793dcf
--- /dev/null
+++ b/vendor/go.uber.org/atomic/unsafe_pointer.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// UnsafePointer is an atomic wrapper around unsafe.Pointer.
+type UnsafePointer struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v unsafe.Pointer
+}
+
+// NewUnsafePointer creates a new UnsafePointer.
+func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
+ return &UnsafePointer{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (p *UnsafePointer) Load() unsafe.Pointer {
+ return atomic.LoadPointer(&p.v)
+}
+
+// Store atomically stores the passed value.
+func (p *UnsafePointer) Store(val unsafe.Pointer) {
+ atomic.StorePointer(&p.v, val)
+}
+
+// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
+func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
+ return atomic.SwapPointer(&p.v, val)
+}
+
+// CAS is an atomic compare-and-swap.
+func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
+ return atomic.CompareAndSwapPointer(&p.v, old, new)
+}
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
deleted file mode 100644
index 8636ab42ad..0000000000
--- a/vendor/go.uber.org/multierr/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/multierr
-
-env:
- global:
- - GO111MODULE=on
-
-go:
- - oldstable
- - stable
-
-before_install:
-- go version
-
-script:
-- |
- set -e
- make lint
- make cover
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index 6f1db9ef4a..3ba05276f1 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,18 @@
Releases
========
+v1.8.0 (2022-02-28)
+===================
+
+- `Combine`: perform zero allocations when there are no errors.
+
+
+v1.7.0 (2021-05-06)
+===================
+
+- Add `AppendInvoke` to append into errors from `defer` blocks.
+
+
v1.6.0 (2020-09-14)
===================
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
index 858e02475f..413e30f7ce 100644
--- a/vendor/go.uber.org/multierr/LICENSE.txt
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Uber Technologies, Inc.
+Copyright (c) 2017-2021 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
index 316004400b..dcb6fe723c 100644
--- a/vendor/go.uber.org/multierr/Makefile
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -34,9 +34,5 @@ lint: gofmt golint staticcheck
.PHONY: cover
cover:
- go test -coverprofile=cover.out -coverpkg=./... -v ./...
+ go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
go tool cover -html=cover.out -o cover.html
-
-update-license:
- @cd tools && go install go.uber.org/tools/update-license
- @$(GOBIN)/update-license $(GO_FILES)
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 751bd65e58..70aacecd71 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -15,9 +15,9 @@ Stable: No breaking changes will be made before 2.0.
Released under the [MIT License].
[MIT License]: LICENSE.txt
-[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
-[doc]: https://godoc.org/go.uber.org/multierr
-[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
+[doc]: https://pkg.go.dev/go.uber.org/multierr
+[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
-[ci]: https://travis-ci.com/uber-go/multierr
+[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index 5c9b67d537..f45af149c1 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -35,8 +35,53 @@
//
// err = multierr.Append(reader.Close(), writer.Close())
//
-// This makes it possible to record resource cleanup failures from deferred
-// blocks with the help of named return values.
+// The underlying list of errors for a returned error object may be retrieved
+// with the Errors function.
+//
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
+//
+// Appending from a loop
+//
+// You sometimes need to append into an error from a loop.
+//
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
+//
+// Cases like this may require knowledge of whether an individual instance
+// failed. This usually requires introduction of a new variable.
+//
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
+//
+// multierr includes AppendInto to simplify cases like this.
+//
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
+//
+// This will append the error into the err variable, and return true if that
+// individual error was non-nil.
+//
+// See AppendInto for more information.
+//
+// Deferred Functions
+//
+// Go makes it possible to modify the return value of a function in a defer
+// block if the function was using named returns. This makes it possible to
+// record resource cleanup failures from deferred blocks.
//
// func sendRequest(req Request) (err error) {
// conn, err := openConnection()
@@ -49,14 +94,21 @@
// // ...
// }
//
-// The underlying list of errors for a returned error object may be retrieved
-// with the Errors function.
+// multierr provides the Invoker type and AppendInvoke function to make cases
+// like the above simpler and obviate the need for a closure. The following is
+// roughly equivalent to the example above.
//
-// errors := multierr.Errors(err)
-// if len(errors) > 0 {
-// fmt.Println("The following errors occurred:", errors)
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
// }
//
+// See AppendInvoke and Invoker for more information.
+//
// Advanced Usage
//
// Errors returned by Combine and Append MAY implement the following
@@ -87,6 +139,7 @@ package multierr // import "go.uber.org/multierr"
import (
"bytes"
+ "errors"
"fmt"
"io"
"strings"
@@ -186,6 +239,33 @@ func (merr *multiError) Errors() []error {
return merr.errors
}
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
func (merr *multiError) Error() string {
if merr == nil {
return ""
@@ -292,6 +372,14 @@ func inspect(errors []error) (res inspectResult) {
// fromSlice converts the given list of errors into a single error.
func fromSlice(errors []error) error {
+ // Don't pay to inspect small slices.
+ switch len(errors) {
+ case 0:
+ return nil
+ case 1:
+ return errors[0]
+ }
+
res := inspect(errors)
switch res.Count {
case 0:
@@ -301,8 +389,13 @@ func fromSlice(errors []error) error {
return errors[res.FirstErrorIdx]
case len(errors):
if !res.ContainsMultiError {
- // already flat
- return &multiError{errors: errors}
+ // Error list is flat. Make a copy of it
+ // Otherwise "errors" escapes to the heap
+ // unconditionally for all other cases.
+ // This lets us optimize for the "no errors" case.
+ out := make([]error, len(errors))
+ copy(out, errors)
+ return &multiError{errors: out}
}
}
@@ -421,7 +514,7 @@ func Append(left error, right error) error {
// items = append(items, item)
// }
//
-// Compare this with a verison that relies solely on Append:
+// Compare this with a version that relies solely on Append:
//
// var err error
// for line := range lines {
@@ -447,3 +540,113 @@ func AppendInto(into *error, err error) (errored bool) {
*into = Append(*into, err)
return true
}
+
+// Invoker is an operation that may fail with an error. Use it with
+// AppendInvoke to append the result of calling the function into an error.
+// This allows you to conveniently defer capture of failing operations.
+//
+// See also, Close and Invoke.
+type Invoker interface {
+ Invoke() error
+}
+
+// Invoke wraps a function which may fail with an error to match the Invoker
+// interface. Use it to supply functions matching this signature to
+// AppendInvoke.
+//
+// For example,
+//
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
+//
+// In this example, the following line will construct the Invoker right away,
+// but defer the invocation of scanner.Err() until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+type Invoke func() error
+
+// Invoke calls the supplied function and returns its result.
+func (i Invoke) Invoke() error { return i() }
+
+// Close builds an Invoker that closes the provided io.Closer. Use it with
+// AppendInvoke to close io.Closers and append their results into an error.
+//
+// For example,
+//
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
+//
+// In this example, multierr.Close will construct the Invoker right away, but
+// defer the invocation of f.Close until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+func Close(closer io.Closer) Invoker {
+ return Invoke(closer.Close)
+}
+
+// AppendInvoke appends the result of calling the given Invoker into the
+// provided error pointer. Use it with named returns to safely defer
+// invocation of fallible operations until a function returns, and capture the
+// resulting errors.
+//
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// // ...
+// }
+//
+// Without defer, AppendInvoke behaves exactly like AppendInto.
+//
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+//
+// // ...is roughly equivalent to...
+//
+// err := // ...
+// multierr.AppendInto(&err, foo())
+//
+// The advantage of the indirection introduced by Invoker is to make it easy
+// to defer the invocation of a function. Without this indirection, the
+// invoked function will be evaluated at the time of the defer block rather
+// than when the function returns.
+//
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
+//
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+//
+// multierr provides a few Invoker implementations out of the box for
+// convenience. See Invoker for more information.
+func AppendInvoke(into *error, invoker Invoker) {
+ AppendInto(into, invoker.Invoke())
+}
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
index 3154a1e64c..92aa65d660 100644
--- a/vendor/go.uber.org/zap/.readme.tmpl
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -96,14 +96,14 @@ Released under the [MIT License](LICENSE.txt).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
-pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
-[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
-[doc]: https://godoc.org/go.uber.org/zap
-[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/zap
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
index 3b99bf0ac8..1793b08c89 100644
--- a/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -1,4 +1,108 @@
# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+ string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+ `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+ characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+ encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+ methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+ better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
+
+## 1.18.1 (28 Jun 2021)
+
+Bugfixes:
+* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`.
+
+[#974]: https://github.com/uber-go/zap/pull/974
+
+## 1.18.0 (28 Jun 2021)
+
+Enhancements:
+* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers
+ messages in-memory and flushes them periodically.
+* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`.
+* [#897][]: Add `zap.WithClock` option to control the source of time via the
+ new `zapcore.Clock` interface.
+* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w`
+ methods don't match expectations.
+* [#943][]: Add support for filtering by level or arbitrary matcher function to
+ `zaptest/observer`.
+* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's
+ `buffer.Buffer`.
+
+Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee
+for their contributions to this release.
+
+[#691]: https://github.com/uber-go/zap/pull/691
+[#897]: https://github.com/uber-go/zap/pull/897
+[#943]: https://github.com/uber-go/zap/pull/943
+[#949]: https://github.com/uber-go/zap/pull/949
+[#961]: https://github.com/uber-go/zap/pull/961
+[#971]: https://github.com/uber-go/zap/pull/971
## 1.17.0 (25 May 2021)
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
index 1e64d6cffc..9c9dfe1ed7 100644
--- a/vendor/go.uber.org/zap/README.md
+++ b/vendor/go.uber.org/zap/README.md
@@ -66,38 +66,38 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 862 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
-| zerolog | 4021 ns/op | +366% | 76 allocs/op
-| go-kit | 4542 ns/op | +427% | 105 allocs/op
-| apex/log | 26785 ns/op | +3007% | 115 allocs/op
-| logrus | 29501 ns/op | +3322% | 125 allocs/op
-| log15 | 29906 ns/op | +3369% | 122 allocs/op
+| :zap: zap | 2900 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op
+| zerolog | 10639 ns/op | +267% | 32 allocs/op
+| go-kit | 14434 ns/op | +398% | 59 allocs/op
+| logrus | 17104 ns/op | +490% | 81 allocs/op
+| apex/log | 32424 ns/op | +1018% | 66 allocs/op
+| log15 | 33579 ns/op | +1058% | 76 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 126 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
-| zerolog | 88 ns/op | -30% | 0 allocs/op
-| go-kit | 5087 ns/op | +3937% | 103 allocs/op
-| log15 | 18548 ns/op | +14621% | 73 allocs/op
-| apex/log | 26012 ns/op | +20544% | 104 allocs/op
-| logrus | 27236 ns/op | +21516% | 113 allocs/op
+| :zap: zap | 373 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op
+| zerolog | 288 ns/op | -23% | 0 allocs/op
+| go-kit | 11785 ns/op | +3060% | 58 allocs/op
+| logrus | 19629 ns/op | +5162% | 70 allocs/op
+| log15 | 21866 ns/op | +5762% | 72 allocs/op
+| apex/log | 30890 ns/op | +8182% | 55 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 118 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
-| zerolog | 93 ns/op | -21% | 0 allocs/op
-| go-kit | 280 ns/op | +137% | 11 allocs/op
-| standard library | 499 ns/op | +323% | 2 allocs/op
-| apex/log | 1990 ns/op | +1586% | 10 allocs/op
-| logrus | 3129 ns/op | +2552% | 24 allocs/op
-| log15 | 3887 ns/op | +3194% | 23 allocs/op
+| :zap: zap | 381 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op
+| zerolog | 369 ns/op | -3% | 0 allocs/op
+| standard library | 385 ns/op | +1% | 2 allocs/op
+| go-kit | 606 ns/op | +59% | 11 allocs/op
+| logrus | 1730 ns/op | +354% | 25 allocs/op
+| apex/log | 1998 ns/op | +424% | 7 allocs/op
+| log15 | 4546 ns/op | +1093% | 22 allocs/op
## Development Status: Stable
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
index 3f4b86e081..9e929cd98e 100644
--- a/vendor/go.uber.org/zap/buffer/buffer.go
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -106,6 +106,24 @@ func (b *Buffer) Write(bs []byte) (int, error) {
return len(bs), nil
}
+// WriteByte writes a single byte to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteByte(v byte) error {
+ b.AppendByte(v)
+ return nil
+}
+
+// WriteString writes a string to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteString(s string) (int, error) {
+ b.AppendString(s)
+ return len(s), nil
+}
+
// TrimNewline trims any final "\n" byte from the end of the buffer.
func (b *Buffer) TrimNewline() {
if i := len(b.bs) - 1; i >= 0 {
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
index c1ac0507cd..3cb46c9e0a 100644
--- a/vendor/go.uber.org/zap/global.go
+++ b/vendor/go.uber.org/zap/global.go
@@ -31,6 +31,7 @@ import (
)
const (
+ _stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
index 3567a9a1e6..8f86c430f0 100644
--- a/vendor/go.uber.org/zap/level.go
+++ b/vendor/go.uber.org/zap/level.go
@@ -86,6 +86,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
return a
}
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+ a := NewAtomicLevel()
+ l, err := zapcore.ParseLevel(text)
+ if err != nil {
+ return a, err
+ }
+
+ a.SetLevel(l)
+ return a, nil
+}
+
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
index 553f258e74..087c742228 100644
--- a/vendor/go.uber.org/zap/logger.go
+++ b/vendor/go.uber.org/zap/logger.go
@@ -24,10 +24,9 @@ import (
"fmt"
"io/ioutil"
"os"
- "runtime"
"strings"
- "time"
+ "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/zapcore"
)
@@ -51,6 +50,8 @@ type Logger struct {
addStack zapcore.LevelEnabler
callerSkip int
+
+ clock zapcore.Clock
}
// New constructs a new Logger from the provided zapcore.Core and Options. If
@@ -71,6 +72,7 @@ func New(core zapcore.Core, options ...Option) *Logger {
core: core,
errorOutput: zapcore.Lock(os.Stderr),
addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
}
return log.WithOptions(options...)
}
@@ -85,6 +87,7 @@ func NewNop() *Logger {
core: zapcore.NewNopCore(),
errorOutput: zapcore.AddSync(ioutil.Discard),
addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
}
}
@@ -256,8 +259,10 @@ func (log *Logger) clone() *Logger {
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
- // check must always be called directly by a method in the Logger interface
- // (e.g., Check, Info, Fatal).
+ // Logger.check must always be called directly by a method in the
+ // Logger interface (e.g., Check, Info, Fatal).
+ // This skips Logger.check and the Info/Fatal/Check/etc. method that
+ // called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
@@ -270,7 +275,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// log message will actually be written somewhere.
ent := zapcore.Entry{
LoggerName: log.name,
- Time: time.Now(),
+ Time: log.clock.Now(),
Level: lvl,
Message: msg,
}
@@ -304,42 +309,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
- if log.addCaller {
- frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
- if !defined {
- fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC())
+
+ addStack := log.addStack.Enabled(ce.Level)
+ if !log.addCaller && !addStack {
+ return ce
+ }
+
+ // Adding the caller or stack trace requires capturing the callers of
+ // this function. We'll share information between these two.
+ stackDepth := stacktraceFirst
+ if addStack {
+ stackDepth = stacktraceFull
+ }
+ stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
+ defer stack.Free()
+
+ if stack.Count() == 0 {
+ if log.addCaller {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
log.errorOutput.Sync()
}
+ return ce
+ }
- ce.Entry.Caller = zapcore.EntryCaller{
- Defined: defined,
+ frame, more := stack.Next()
+
+ if log.addCaller {
+ ce.Caller = zapcore.EntryCaller{
+ Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
- if log.addStack.Enabled(ce.Entry.Level) {
- ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
- }
- return ce
-}
+ if addStack {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
-// getCallerFrame gets caller frame. The argument skip is the number of stack
-// frames to ascend, with 0 identifying the caller of getCallerFrame. The
-// boolean ok is false if it was not possible to recover the information.
-//
-// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
-func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
- const skipOffset = 2 // skip getCallerFrame and Callers
-
- pc := make([]uintptr, 1)
- numFrames := runtime.Callers(skip+skipOffset, pc)
- if numFrames < 1 {
- return
+ stackfmt := newStackFormatter(buffer)
+
+ // We've already extracted the first frame, so format that
+ // separately and defer to stackfmt for the rest.
+ stackfmt.FormatFrame(frame)
+ if more {
+ stackfmt.FormatStack(stack)
+ }
+ ce.Stack = buffer.String()
}
- frame, _ = runtime.CallersFrames(pc).Next()
- return frame, frame.PC != 0
+ return ce
}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
index 0135c20923..e9e66161f5 100644
--- a/vendor/go.uber.org/zap/options.go
+++ b/vendor/go.uber.org/zap/options.go
@@ -138,3 +138,11 @@ func OnFatal(action zapcore.CheckWriteAction) Option {
log.onFatal = action
})
}
+
+// WithClock specifies the clock used by the logger to determine the current
+// time for logged entries. Defaults to the system clock with time.Now.
+func WithClock(clock zapcore.Clock) Option {
+ return optionFunc(func(log *Logger) {
+ log.clock = clock
+ })
+}
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
index 0cf8c1ddff..3d187fa566 100644
--- a/vendor/go.uber.org/zap/stacktrace.go
+++ b/vendor/go.uber.org/zap/stacktrace.go
@@ -24,62 +24,153 @@ import (
"runtime"
"sync"
+ "go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
)
-var (
- _stacktracePool = sync.Pool{
- New: func() interface{} {
- return newProgramCounters(64)
- },
- }
+var _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return &stacktrace{
+ storage: make([]uintptr, 64),
+ }
+ },
+}
+
+type stacktrace struct {
+ pcs []uintptr // program counters; always a subslice of storage
+ frames *runtime.Frames
+
+ // The size of pcs varies depending on requirements:
+ // it will be one if the only the first frame was requested,
+ // and otherwise it will reflect the depth of the call stack.
+ //
+ // storage decouples the slice we need (pcs) from the slice we pool.
+ // We will always allocate a reasonably large storage, but we'll use
+ // only as much of it as we need.
+ storage []uintptr
+}
+
+// stacktraceDepth specifies how deep of a stack trace should be captured.
+type stacktraceDepth int
+
+const (
+ // stacktraceFirst captures only the first frame.
+ stacktraceFirst stacktraceDepth = iota
+
+ // stacktraceFull captures the entire call stack, allocating more
+ // storage for it if needed.
+ stacktraceFull
)
-func takeStacktrace(skip int) string {
- buffer := bufferpool.Get()
- defer buffer.Free()
- programCounters := _stacktracePool.Get().(*programCounters)
- defer _stacktracePool.Put(programCounters)
-
- var numFrames int
- for {
- // Skip the call to runtime.Callers and takeStacktrace so that the
- // program counters start at the caller of takeStacktrace.
- numFrames = runtime.Callers(skip+2, programCounters.pcs)
- if numFrames < len(programCounters.pcs) {
- break
- }
- // Don't put the too-short counter slice back into the pool; this lets
- // the pool adjust if we consistently take deep stacktraces.
- programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+// captureStacktrace captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// captureStacktrace.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
+ stack := _stacktracePool.Get().(*stacktrace)
+
+ switch depth {
+ case stacktraceFirst:
+ stack.pcs = stack.storage[:1]
+ case stacktraceFull:
+ stack.pcs = stack.storage
}
- i := 0
- frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
+ // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+ // itself. +2 to skip captureStacktrace and runtime.Callers.
+ numFrames := runtime.Callers(
+ skip+2,
+ stack.pcs,
+ )
- // Note: On the last iteration, frames.Next() returns false, with a valid
- // frame, but we ignore this frame. The last frame is a a runtime frame which
- // adds noise, since it's only either runtime.main or runtime.goexit.
- for frame, more := frames.Next(); more; frame, more = frames.Next() {
- if i != 0 {
- buffer.AppendByte('\n')
+ // runtime.Callers truncates the recorded stacktrace if there is no
+ // room in the provided slice. For the full stack trace, keep expanding
+ // storage until there are fewer frames than there is room.
+ if depth == stacktraceFull {
+ pcs := stack.pcs
+ for numFrames == len(pcs) {
+ pcs = make([]uintptr, len(pcs)*2)
+ numFrames = runtime.Callers(skip+2, pcs)
}
- i++
- buffer.AppendString(frame.Function)
- buffer.AppendByte('\n')
- buffer.AppendByte('\t')
- buffer.AppendString(frame.File)
- buffer.AppendByte(':')
- buffer.AppendInt(int64(frame.Line))
+
+ // Discard old storage instead of returning it to the pool.
+ // This will adjust the pool size over time if stack traces are
+ // consistently very deep.
+ stack.storage = pcs
+ stack.pcs = pcs[:numFrames]
+ } else {
+ stack.pcs = stack.pcs[:numFrames]
}
+ stack.frames = runtime.CallersFrames(stack.pcs)
+ return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *stacktrace) Free() {
+ st.frames = nil
+ st.pcs = nil
+ _stacktracePool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *stacktrace) Count() int {
+ return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
+ return st.frames.Next()
+}
+
+func takeStacktrace(skip int) string {
+ stack := captureStacktrace(skip+1, stacktraceFull)
+ defer stack.Free()
+
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := newStackFormatter(buffer)
+ stackfmt.FormatStack(stack)
return buffer.String()
}
-type programCounters struct {
- pcs []uintptr
+// stackFormatter formats a stack trace into a readable string representation.
+type stackFormatter struct {
+ b *buffer.Buffer
+ nonEmpty bool // whehther we've written at least one frame already
+}
+
+// newStackFormatter builds a new stackFormatter.
+func newStackFormatter(b *buffer.Buffer) stackFormatter {
+ return stackFormatter{b: b}
+}
+
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *stackFormatter) FormatStack(stack *stacktrace) {
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := stack.Next(); more; frame, more = stack.Next() {
+ sf.FormatFrame(frame)
+ }
}
-func newProgramCounters(size int) *programCounters {
- return &programCounters{make([]uintptr, size)}
+// FormatFrame formats the given frame.
+func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
+ if sf.nonEmpty {
+ sf.b.AppendByte('\n')
+ }
+ sf.nonEmpty = true
+ sf.b.AppendString(frame.Function)
+ sf.b.AppendByte('\n')
+ sf.b.AppendByte('\t')
+ sf.b.AppendString(frame.File)
+ sf.b.AppendByte(':')
+ sf.b.AppendInt(int64(frame.Line))
}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
index 4084dada79..0b9651981a 100644
--- a/vendor/go.uber.org/zap/sugar.go
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -266,7 +266,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
- s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i]))
+ s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
break
}
@@ -287,7 +287,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
// If we encountered any invalid key-value pairs, log an error.
if len(invalid) > 0 {
- s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid))
+ s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
}
return fields
}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
new file mode 100644
index 0000000000..ef2f7d9637
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -0,0 +1,188 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bufio"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ // _defaultBufferSize specifies the default size used by Buffer.
+ _defaultBufferSize = 256 * 1024 // 256 kB
+
+ // _defaultFlushInterval specifies the default flush interval for
+ // Buffer.
+ _defaultFlushInterval = 30 * time.Second
+)
+
+// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
+// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
+// fixed interval--whichever comes first.
+//
+// BufferedWriteSyncer is safe for concurrent use. You don't need to use
+// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+type BufferedWriteSyncer struct {
+ // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
+ // writes.
+ //
+ // This field is required.
+ WS WriteSyncer
+
+ // Size specifies the maximum amount of data the writer will buffered
+ // before flushing.
+ //
+ // Defaults to 256 kB if unspecified.
+ Size int
+
+ // FlushInterval specifies how often the writer should flush data if
+ // there have been no writes.
+ //
+ // Defaults to 30 seconds if unspecified.
+ FlushInterval time.Duration
+
+ // Clock, if specified, provides control of the source of time for the
+ // writer.
+ //
+ // Defaults to the system clock.
+ Clock Clock
+
+ // unexported fields for state
+ mu sync.Mutex
+ initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
+ writer *bufio.Writer
+ ticker *time.Ticker
+ stop chan struct{} // closed when flushLoop should stop
+ done chan struct{} // closed when flushLoop has stopped
+}
+
+func (s *BufferedWriteSyncer) initialize() {
+ size := s.Size
+ if size == 0 {
+ size = _defaultBufferSize
+ }
+
+ flushInterval := s.FlushInterval
+ if flushInterval == 0 {
+ flushInterval = _defaultFlushInterval
+ }
+
+ if s.Clock == nil {
+ s.Clock = DefaultClock
+ }
+
+ s.ticker = s.Clock.NewTicker(flushInterval)
+ s.writer = bufio.NewWriterSize(s.WS, size)
+ s.stop = make(chan struct{})
+ s.done = make(chan struct{})
+ s.initialized = true
+ go s.flushLoop()
+}
+
+// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
+// and log data will be flushed to disk when the buffer is full or periodically.
+func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ s.initialize()
+ }
+
+ // To avoid partial writes from being flushed, we manually flush the existing buffer if:
+ // * The current write doesn't fit into the buffer fully, and
+ // * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
+ if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
+ if err := s.writer.Flush(); err != nil {
+ return 0, err
+ }
+ }
+
+ return s.writer.Write(bs)
+}
+
+// Sync flushes buffered log data into disk directly.
+func (s *BufferedWriteSyncer) Sync() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var err error
+ if s.initialized {
+ err = s.writer.Flush()
+ }
+
+ return multierr.Append(err, s.WS.Sync())
+}
+
+// flushLoop flushes the buffer at the configured interval until Stop is
+// called.
+func (s *BufferedWriteSyncer) flushLoop() {
+ defer close(s.done)
+
+ for {
+ select {
+ case <-s.ticker.C:
+ // we just simply ignore error here
+ // because the underlying bufio writer stores any errors
+ // and we return any error from Sync() as part of the close
+ _ = s.Sync()
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+// Stop closes the buffer, cleans up background goroutines, and flushes
+// remaining unwritten data.
+func (s *BufferedWriteSyncer) Stop() (err error) {
+ var stopped bool
+
+ // Critical section.
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ return
+ }
+
+ stopped = s.stopped
+ if stopped {
+ return
+ }
+ s.stopped = true
+
+ s.ticker.Stop()
+ close(s.stop) // tell flushLoop to stop
+ <-s.done // and wait until it has
+ }()
+
+ // Don't call Sync on consecutive Stops.
+ if !stopped {
+ err = s.Sync()
+ }
+
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
new file mode 100644
index 0000000000..422fd82a6b
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// DefaultClock is the default clock used by Zap in operations that require
+// time. This clock uses the system clock for all operations.
+var DefaultClock = systemClock{}
+
+// Clock is a source of time for logged entries.
+type Clock interface {
+ // Now returns the current local time.
+ Now() time.Time
+
+ // NewTicker returns *time.Ticker that holds a channel
+ // that delivers "ticks" of a clock.
+ NewTicker(time.Duration) *time.Ticker
+}
+
+// systemClock implements default Clock that uses system time.
+type systemClock struct{}
+
+func (systemClock) Now() time.Time {
+ return time.Now()
+}
+
+func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
+ return time.NewTicker(duration)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 2307af404c..1aa5dc3646 100644
--- a/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
line.AppendString(ent.Stack)
}
- if c.LineEnding != "" {
- line.AppendString(c.LineEnding)
- } else {
- line.AppendString(DefaultLineEnding)
- }
+ line.AppendString(c.LineEnding)
return line, nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
index 6601ca166c..6e5fd56511 100644
--- a/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -22,6 +22,7 @@ package zapcore
import (
"encoding/json"
+ "io"
"time"
"go.uber.org/zap/buffer"
@@ -312,14 +313,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error {
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
- MessageKey string `json:"messageKey" yaml:"messageKey"`
- LevelKey string `json:"levelKey" yaml:"levelKey"`
- TimeKey string `json:"timeKey" yaml:"timeKey"`
- NameKey string `json:"nameKey" yaml:"nameKey"`
- CallerKey string `json:"callerKey" yaml:"callerKey"`
- FunctionKey string `json:"functionKey" yaml:"functionKey"`
- StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
- LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@@ -330,6 +332,9 @@ type EncoderConfig struct {
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
index 4aa8b4f90b..0885505b75 100644
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -208,7 +208,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// If the entry is dirty, log an internal error; because the
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
- fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry)
+ fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
ce.ErrorOutput.Sync()
}
return
@@ -219,11 +219,9 @@ func (ce *CheckedEntry) Write(fields ...Field) {
for i := range ce.cores {
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
}
- if ce.ErrorOutput != nil {
- if err != nil {
- fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err)
- ce.ErrorOutput.Sync()
- }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ ce.ErrorOutput.Sync()
}
should, msg := ce.should, ce.Message
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
index f2a07d7864..74919b0ccb 100644
--- a/vendor/go.uber.org/zap/zapcore/error.go
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -83,7 +83,7 @@ type errorGroup interface {
Errors() []error
}
-// Note that errArry and errArrayElem are very similar to the version
+// Note that errArray and errArrayElem are very similar to the version
// implemented in the top-level error.go file. We can't re-use this because
// that would require exporting errArray as part of the zapcore API.
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
index 5cf7d917e9..c5d751b821 100644
--- a/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -22,7 +22,6 @@ package zapcore
import (
"encoding/base64"
- "encoding/json"
"math"
"sync"
"time"
@@ -64,7 +63,7 @@ type jsonEncoder struct {
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
- reflectEnc *json.Encoder
+ reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@@ -82,6 +81,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder {
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@@ -118,6 +128,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.AppendComplex128(val)
}
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@@ -128,6 +143,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.AppendFloat64(val)
}
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
@@ -136,10 +156,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) {
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
- enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
-
- // For consistency with our custom JSON encoder.
- enc.reflectEnc.SetEscapeHTML(false)
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@@ -201,10 +218,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
return err
}
@@ -220,16 +243,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.buf.AppendByte('"')
}
-func (enc *jsonEncoder) AppendComplex128(val complex128) {
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
- enc.buf.AppendFloat(r, 64)
- enc.buf.AppendByte('+')
- enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@@ -292,29 +322,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.buf.AppendUint(val)
}
-func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
-func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
-func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
-func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@@ -335,7 +364,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final := enc.clone()
final.buf.AppendByte('{')
- if final.LevelKey != "" {
+ if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@@ -396,11 +425,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
- if final.LineEnding != "" {
- final.buf.AppendString(final.LineEnding)
- } else {
- final.buf.AppendString(DefaultLineEnding)
- }
+ final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@@ -415,6 +440,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
+ enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
index e575c9f432..56e88dc0c8 100644
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -55,6 +55,18 @@ const (
_maxLevel = FatalLevel
)
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
index d3ab9af933..8746360eca 100644
--- a/vendor/go.uber.org/zap/global_prego112.go
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,24 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build !go1.12
+package zapcore
-package zap
+import (
+ "encoding/json"
+ "io"
+)
-const _stdLogDefaultDepth = 2
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
index 25f10ca1d7..8c116049d3 100644
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -133,10 +133,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
-// Keep in mind that zap's sampling implementation is optimized for speed over
+// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@@ -197,12 +208,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
return ce
}
- counter := s.counts.get(ent.Level, ent.Message)
- n := counter.IncCheckReset(ent.Time, s.tick)
- if n > s.first && (n-s.first)%s.thereafter != 0 {
- s.hook(ent, LogDropped)
- return ce
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
}
- s.hook(ent, LogSampled)
return s.Core.Check(ent, ce)
}
diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go
new file mode 100644
index 0000000000..75248fd16e
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/googleapi.go
@@ -0,0 +1,468 @@
+// Copyright 2011 Google LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package googleapi contains the common code shared by all Google API
+// libraries.
+package googleapi // import "google.golang.org/api/googleapi"
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "google.golang.org/api/internal/third_party/uritemplates"
+)
+
+// ContentTyper is an interface for Readers which know (or would like
+// to override) their Content-Type. If a media body doesn't implement
+// ContentTyper, the type is sniffed from the content using
+// http.DetectContentType.
+type ContentTyper interface {
+ ContentType() string
+}
+
+// A SizeReaderAt is a ReaderAt with a Size method.
+// An io.SectionReader implements SizeReaderAt.
+type SizeReaderAt interface {
+ io.ReaderAt
+ Size() int64
+}
+
+// ServerResponse is embedded in each Do response and
+// provides the HTTP status code and header sent by the server.
+type ServerResponse struct {
+ // HTTPStatusCode is the server's response status code. When using a
+ // resource method's Do call, this will always be in the 2xx range.
+ HTTPStatusCode int
+ // Header contains the response header fields from the server.
+ Header http.Header
+}
+
+const (
+ // Version defines the gax version being used. This is typically sent
+ // in an HTTP header to services.
+ Version = "0.5"
+
+ // UserAgent is the header string used to identify this package.
+ UserAgent = "google-api-go-client/" + Version
+
+ // DefaultUploadChunkSize is the default chunk size to use for resumable
+ // uploads if not specified by the user.
+ DefaultUploadChunkSize = 16 * 1024 * 1024
+
+ // MinUploadChunkSize is the minimum chunk size that can be used for
+ // resumable uploads. All user-specified chunk sizes must be multiple of
+ // this value.
+ MinUploadChunkSize = 256 * 1024
+)
+
+// Error contains an error response from the server.
+type Error struct {
+ // Code is the HTTP response status code and will always be populated.
+ Code int `json:"code"`
+ // Message is the server response message and is only populated when
+ // explicitly referenced by the JSON server response.
+ Message string `json:"message"`
+ // Details provide more context to an error.
+ Details []interface{} `json:"details"`
+ // Body is the raw response returned by the server.
+ // It is often but not always JSON, depending on how the request fails.
+ Body string
+ // Header contains the response header fields from the server.
+ Header http.Header
+
+ Errors []ErrorItem
+}
+
+// ErrorItem is a detailed error code & message from the Google API frontend.
+type ErrorItem struct {
+ // Reason is the typed error code. For example: "some_example".
+ Reason string `json:"reason"`
+ // Message is the human-readable description of the error.
+ Message string `json:"message"`
+}
+
+func (e *Error) Error() string {
+ if len(e.Errors) == 0 && e.Message == "" {
+ return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
+ }
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
+ if e.Message != "" {
+ fmt.Fprintf(&buf, "%s", e.Message)
+ }
+ if len(e.Details) > 0 {
+ var detailBuf bytes.Buffer
+ enc := json.NewEncoder(&detailBuf)
+ enc.SetIndent("", " ")
+ if err := enc.Encode(e.Details); err == nil {
+ fmt.Fprint(&buf, "\nDetails:")
+ fmt.Fprintf(&buf, "\n%s", detailBuf.String())
+
+ }
+ }
+ if len(e.Errors) == 0 {
+ return strings.TrimSpace(buf.String())
+ }
+ if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
+ fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
+ return buf.String()
+ }
+ fmt.Fprintln(&buf, "\nMore details:")
+ for _, v := range e.Errors {
+ fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
+ }
+ return buf.String()
+}
+
+type errorReply struct {
+ Error *Error `json:"error"`
+}
+
+// CheckResponse returns an error (of type *Error) if the response
+// status code is not 2xx.
+func CheckResponse(res *http.Response) error {
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ return nil
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err == nil {
+ jerr := new(errorReply)
+ err = json.Unmarshal(slurp, jerr)
+ if err == nil && jerr.Error != nil {
+ if jerr.Error.Code == 0 {
+ jerr.Error.Code = res.StatusCode
+ }
+ jerr.Error.Body = string(slurp)
+ jerr.Error.Header = res.Header
+ return jerr.Error
+ }
+ }
+ return &Error{
+ Code: res.StatusCode,
+ Body: string(slurp),
+ Header: res.Header,
+ }
+}
+
+// IsNotModified reports whether err is the result of the
+// server replying with http.StatusNotModified.
+// Such error values are sometimes returned by "Do" methods
+// on calls when If-None-Match is used.
+func IsNotModified(err error) bool {
+ if err == nil {
+ return false
+ }
+ ae, ok := err.(*Error)
+ return ok && ae.Code == http.StatusNotModified
+}
+
+// CheckMediaResponse returns an error (of type *Error) if the response
+// status code is not 2xx. Unlike CheckResponse it does not assume the
+// body is a JSON error document.
+// It is the caller's responsibility to close res.Body.
+func CheckMediaResponse(res *http.Response) error {
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ return nil
+ }
+ slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
+ return &Error{
+ Code: res.StatusCode,
+ Body: string(slurp),
+ }
+}
+
+// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper.
+type MarshalStyle bool
+
+// WithDataWrapper marshals JSON with a {"data": ...} wrapper.
+var WithDataWrapper = MarshalStyle(true)
+
+// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper.
+var WithoutDataWrapper = MarshalStyle(false)
+
+func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
+ buf := new(bytes.Buffer)
+ if wrap {
+ buf.Write([]byte(`{"data": `))
+ }
+ err := json.NewEncoder(buf).Encode(v)
+ if err != nil {
+ return nil, err
+ }
+ if wrap {
+ buf.Write([]byte(`}`))
+ }
+ return buf, nil
+}
+
+// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
+// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
+// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
+type ProgressUpdater func(current, total int64)
+
+// MediaOption defines the interface for setting media options.
+type MediaOption interface {
+ setOptions(o *MediaOptions)
+}
+
+type contentTypeOption string
+
+func (ct contentTypeOption) setOptions(o *MediaOptions) {
+ o.ContentType = string(ct)
+ if o.ContentType == "" {
+ o.ForceEmptyContentType = true
+ }
+}
+
+// ContentType returns a MediaOption which sets the Content-Type header for media uploads.
+// If ctype is empty, the Content-Type header will be omitted.
+func ContentType(ctype string) MediaOption {
+ return contentTypeOption(ctype)
+}
+
+type chunkSizeOption int
+
+func (cs chunkSizeOption) setOptions(o *MediaOptions) {
+ size := int(cs)
+ if size%MinUploadChunkSize != 0 {
+ size += MinUploadChunkSize - (size % MinUploadChunkSize)
+ }
+ o.ChunkSize = size
+}
+
+// ChunkSize returns a MediaOption which sets the chunk size for media uploads.
+// size will be rounded up to the nearest multiple of 256K.
+// Media which contains fewer than size bytes will be uploaded in a single request.
+// Media which contains size bytes or more will be uploaded in separate chunks.
+// If size is zero, media will be uploaded in a single request.
+func ChunkSize(size int) MediaOption {
+ return chunkSizeOption(size)
+}
+
+type chunkRetryDeadlineOption time.Duration
+
+func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) {
+ o.ChunkRetryDeadline = time.Duration(cd)
+}
+
+// ChunkRetryDeadline returns a MediaOption which sets a per-chunk retry
+// deadline. If a single chunk has been attempting to upload for longer than
+// this time and the request fails, it will no longer be retried, and the error
+// will be returned to the caller.
+// This is only applicable for files which are large enough to require
+// a multi-chunk resumable upload.
+// The default value is 32s.
+// To set a deadline on the entire upload, use context timeout or cancellation.
+func ChunkRetryDeadline(deadline time.Duration) MediaOption {
+ return chunkRetryDeadlineOption(deadline)
+}
+
+// MediaOptions stores options for customizing media upload. It is not used by developers directly.
+type MediaOptions struct {
+ ContentType string
+ ForceEmptyContentType bool
+ ChunkSize int
+ ChunkRetryDeadline time.Duration
+}
+
+// ProcessMediaOptions stores options from opts in a MediaOptions.
+// It is not used by developers directly.
+func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
+ mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize}
+ for _, o := range opts {
+ o.setOptions(mo)
+ }
+ return mo
+}
+
+// ResolveRelative resolves relatives such as "http://www.golang.org/" and
+// "topics/myproject/mytopic" into a single string, such as
+// "http://www.golang.org/topics/myproject/mytopic". It strips all parent
+// references (e.g. ../..) as well as anything after the host
+// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz).
+//
+// ResolveRelative panics if either basestr or relstr is not able to be parsed.
+func ResolveRelative(basestr, relstr string) string {
+ u, err := url.Parse(basestr)
+ if err != nil {
+ panic(fmt.Sprintf("failed to parse %q", basestr))
+ }
+ afterColonPath := ""
+ if i := strings.IndexRune(relstr, ':'); i > 0 {
+ afterColonPath = relstr[i+1:]
+ relstr = relstr[:i]
+ }
+ rel, err := url.Parse(relstr)
+ if err != nil {
+ panic(fmt.Sprintf("failed to parse %q", relstr))
+ }
+ u = u.ResolveReference(rel)
+ us := u.String()
+ if afterColonPath != "" {
+ us = fmt.Sprintf("%s:%s", us, afterColonPath)
+ }
+ us = strings.Replace(us, "%7B", "{", -1)
+ us = strings.Replace(us, "%7D", "}", -1)
+ us = strings.Replace(us, "%2A", "*", -1)
+ return us
+}
+
+// Expand subsitutes any {encoded} strings in the URL passed in using
+// the map supplied.
+//
+// This calls SetOpaque to avoid encoding of the parameters in the URL path.
+func Expand(u *url.URL, expansions map[string]string) {
+ escaped, unescaped, err := uritemplates.Expand(u.Path, expansions)
+ if err == nil {
+ u.Path = unescaped
+ u.RawPath = escaped
+ }
+}
+
+// CloseBody is used to close res.Body.
+// Prior to calling Close, it also tries to Read a small amount to see an EOF.
+// Not seeing an EOF can prevent HTTP Transports from reusing connections.
+func CloseBody(res *http.Response) {
+ if res == nil || res.Body == nil {
+ return
+ }
+ // Justification for 3 byte reads: two for up to "\r\n" after
+ // a JSON/XML document, and then 1 to see EOF if we haven't yet.
+ // TODO(bradfitz): detect Go 1.3+ and skip these reads.
+ // See https://codereview.appspot.com/58240043
+ // and https://codereview.appspot.com/49570044
+ buf := make([]byte, 1)
+ for i := 0; i < 3; i++ {
+ _, err := res.Body.Read(buf)
+ if err != nil {
+ break
+ }
+ }
+ res.Body.Close()
+
+}
+
+// VariantType returns the type name of the given variant.
+// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
+// This is used to support "variant" APIs that can return one of a number of different types.
+func VariantType(t map[string]interface{}) string {
+ s, _ := t["type"].(string)
+ return s
+}
+
+// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
+// This is used to support "variant" APIs that can return one of a number of different types.
+// It reports whether the conversion was successful.
+func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
+ var buf bytes.Buffer
+ err := json.NewEncoder(&buf).Encode(v)
+ if err != nil {
+ return false
+ }
+ return json.Unmarshal(buf.Bytes(), dst) == nil
+}
+
+// A Field names a field to be retrieved with a partial response.
+// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance
+//
+// Partial responses can dramatically reduce the amount of data that must be sent to your application.
+// In order to request partial responses, you can specify the full list of fields
+// that your application needs by adding the Fields option to your request.
+//
+// Field strings use camelCase with leading lower-case characters to identify fields within the response.
+//
+// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
+// you could request just those fields like this:
+//
+// svc.Events.List().Fields("nextPageToken", "items/id").Do()
+//
+// or if you were also interested in each Item's "Updated" field, you can combine them like this:
+//
+// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
+//
+// Another way to find field names is through the Google API explorer:
+// https://developers.google.com/apis-explorer/#p/
+type Field string
+
+// CombineFields combines fields into a single string.
+func CombineFields(s []Field) string {
+ r := make([]string, len(s))
+ for i, v := range s {
+ r[i] = string(v)
+ }
+ return strings.Join(r, ",")
+}
+
+// A CallOption is an optional argument to an API call.
+// It should be treated as an opaque value by users of Google APIs.
+//
+// A CallOption is something that configures an API call in a way that is
+// not specific to that API; for instance, controlling the quota user for
+// an API call is common across many APIs, and is thus a CallOption.
+type CallOption interface {
+ Get() (key, value string)
+}
+
+// A MultiCallOption is an option argument to an API call and can be passed
+// anywhere a CallOption is accepted. It additionally supports returning a slice
+// of values for a given key.
+type MultiCallOption interface {
+ CallOption
+ GetMulti() (key string, value []string)
+}
+
+// QuotaUser returns a CallOption that will set the quota user for a call.
+// The quota user can be used by server-side applications to control accounting.
+// It can be an arbitrary string up to 40 characters, and will override UserIP
+// if both are provided.
+func QuotaUser(u string) CallOption { return quotaUser(u) }
+
+type quotaUser string
+
+func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) }
+
+// UserIP returns a CallOption that will set the "userIp" parameter of a call.
+// This should be the IP address of the originating request.
+func UserIP(ip string) CallOption { return userIP(ip) }
+
+type userIP string
+
+func (i userIP) Get() (string, string) { return "userIp", string(i) }
+
+// Trace returns a CallOption that enables diagnostic tracing for a call.
+// traceToken is an ID supplied by Google support.
+func Trace(traceToken string) CallOption { return traceTok(traceToken) }
+
+type traceTok string
+
+func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) }
+
+type queryParameter struct {
+ key string
+ values []string
+}
+
+// QueryParameter allows setting the value(s) of an arbitrary key.
+func QueryParameter(key string, values ...string) CallOption {
+ return queryParameter{key: key, values: append([]string{}, values...)}
+}
+
+// Get will never actually be called -- GetMulti will.
+func (q queryParameter) Get() (string, string) {
+ return "", ""
+}
+
+// GetMulti returns the key and values values associated to that key.
+func (q queryParameter) GetMulti() (string, []string) {
+ return q.key, q.values
+}
+
+// TODO: Fields too
diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go
new file mode 100644
index 0000000000..fabf74d50d
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/types.go
@@ -0,0 +1,202 @@
+// Copyright 2013 Google LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package googleapi
+
+import (
+ "encoding/json"
+ "errors"
+ "strconv"
+)
+
+// Int64s is a slice of int64s that marshal as quoted strings in JSON.
+type Int64s []int64
+
+func (q *Int64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, int64(v))
+ }
+ return nil
+}
+
+// Int32s is a slice of int32s that marshal as quoted strings in JSON.
+type Int32s []int32
+
+func (q *Int32s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, int32(v))
+ }
+ return nil
+}
+
+// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
+type Uint64s []uint64
+
+func (q *Uint64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, uint64(v))
+ }
+ return nil
+}
+
+// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
+type Uint32s []uint32
+
+func (q *Uint32s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, uint32(v))
+ }
+ return nil
+}
+
+// Float64s is a slice of float64s that marshal as quoted strings in JSON.
+type Float64s []float64
+
+func (q *Float64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, float64(v))
+ }
+ return nil
+}
+
+func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
+ dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
+ dst = append(dst, '[')
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ dst = append(dst, ',')
+ }
+ dst = append(dst, '"')
+ dst = fn(dst, i)
+ dst = append(dst, '"')
+ }
+ dst = append(dst, ']')
+ return dst, nil
+}
+
+func (q Int64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(q), func(dst []byte, i int) []byte {
+ return strconv.AppendInt(dst, q[i], 10)
+ })
+}
+
+func (q Int32s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(q), func(dst []byte, i int) []byte {
+ return strconv.AppendInt(dst, int64(q[i]), 10)
+ })
+}
+
+func (q Uint64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(q), func(dst []byte, i int) []byte {
+ return strconv.AppendUint(dst, q[i], 10)
+ })
+}
+
+func (q Uint32s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(q), func(dst []byte, i int) []byte {
+ return strconv.AppendUint(dst, uint64(q[i]), 10)
+ })
+}
+
+func (q Float64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(q), func(dst []byte, i int) []byte {
+ return strconv.AppendFloat(dst, q[i], 'g', -1, 64)
+ })
+}
+
+// RawMessage is a raw encoded JSON value.
+// It is identical to json.RawMessage, except it does not suffer from
+// https://golang.org/issue/14493.
+type RawMessage []byte
+
+// MarshalJSON returns m.
+func (m RawMessage) MarshalJSON() ([]byte, error) {
+ return m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[:0], data...)
+ return nil
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
index 855604b75d..32d52413b3 100644
--- a/vendor/google.golang.org/api/internal/creds.go
+++ b/vendor/google.golang.org/api/internal/creds.go
@@ -31,6 +31,9 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
}
func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
+ if ds.InternalCredentials != nil {
+ return ds.InternalCredentials, nil
+ }
if ds.Credentials != nil {
return ds.Credentials, nil
}
@@ -67,11 +70,12 @@ const (
//
// - A self-signed JWT flow will be executed if the following conditions are
// met:
-// (1) At least one of the following is true:
-// (a) No scope is provided
-// (b) Scope for self-signed JWT flow is enabled
-// (c) Audiences are explicitly provided by users
-// (2) No service account impersontation
+//
+// (1) At least one of the following is true:
+// (a) No scope is provided
+// (b) Scope for self-signed JWT flow is enabled
+// (c) Audiences are explicitly provided by users
+// (2) No service account impersontation
//
// - Otherwise, executes standard OAuth 2.0 flow
// More details: google.aip.dev/auth/4111
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
index 89c7bc86fa..76efdb2277 100644
--- a/vendor/google.golang.org/api/internal/settings.go
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -19,32 +19,34 @@ import (
// DialSettings holds information needed to establish a connection with a
// Google API service.
type DialSettings struct {
- Endpoint string
- DefaultEndpoint string
- DefaultMTLSEndpoint string
- Scopes []string
- DefaultScopes []string
- EnableJwtWithScope bool
- TokenSource oauth2.TokenSource
- Credentials *google.Credentials
- CredentialsFile string // if set, Token Source is ignored.
- CredentialsJSON []byte
- UserAgent string
- APIKey string
- Audiences []string
- DefaultAudience string
- HTTPClient *http.Client
- GRPCDialOpts []grpc.DialOption
- GRPCConn *grpc.ClientConn
- GRPCConnPool ConnPool
- GRPCConnPoolSize int
- NoAuth bool
- TelemetryDisabled bool
- ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
- CustomClaims map[string]interface{}
- SkipValidation bool
- ImpersonationConfig *impersonate.Config
- EnableDirectPath bool
+ Endpoint string
+ DefaultEndpoint string
+ DefaultMTLSEndpoint string
+ Scopes []string
+ DefaultScopes []string
+ EnableJwtWithScope bool
+ TokenSource oauth2.TokenSource
+ Credentials *google.Credentials
+ CredentialsFile string // if set, Token Source is ignored.
+ CredentialsJSON []byte
+ InternalCredentials *google.Credentials
+ UserAgent string
+ APIKey string
+ Audiences []string
+ DefaultAudience string
+ HTTPClient *http.Client
+ GRPCDialOpts []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ GRPCConnPool ConnPool
+ GRPCConnPoolSize int
+ NoAuth bool
+ TelemetryDisabled bool
+ ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+ CustomClaims map[string]interface{}
+ SkipValidation bool
+ ImpersonationConfig *impersonate.Config
+ EnableDirectPath bool
+ AllowNonDefaultServiceAccount bool
// Google API system parameters. For more information please read:
// https://cloud.google.com/apis/docs/system-parameters
diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE
new file mode 100644
index 0000000000..7109c6ef93
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 Joshua Tacoma. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA
new file mode 100644
index 0000000000..c7f86fcd5f
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA
@@ -0,0 +1,14 @@
+name: "uritemplates"
+description:
+ "Package uritemplates is a level 4 implementation of RFC 6570 (URI "
+ "Template, http://tools.ietf.org/html/rfc6570)."
+
+third_party {
+ url {
+ type: GIT
+ value: "https://github.com/jtacoma/uritemplates"
+ }
+ version: "0.1"
+ last_upgrade_date { year: 2014 month: 8 day: 18 }
+ license_type: NOTICE
+}
diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go
new file mode 100644
index 0000000000..8c27d19d75
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go
@@ -0,0 +1,248 @@
+// Copyright 2013 Joshua Tacoma. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uritemplates is a level 3 implementation of RFC 6570 (URI
+// Template, http://tools.ietf.org/html/rfc6570).
+// uritemplates does not support composite values (in Go: slices or maps)
+// and so does not qualify as a level 4 implementation.
+package uritemplates
+
+import (
+ "bytes"
+ "errors"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
+ reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
+ validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
+ hex = []byte("0123456789ABCDEF")
+)
+
+func pctEncode(src []byte) []byte {
+ dst := make([]byte, len(src)*3)
+ for i, b := range src {
+ buf := dst[i*3 : i*3+3]
+ buf[0] = 0x25
+ buf[1] = hex[b/16]
+ buf[2] = hex[b%16]
+ }
+ return dst
+}
+
+// pairWriter is a convenience struct which allows escaped and unescaped
+// versions of the template to be written in parallel.
+type pairWriter struct {
+ escaped, unescaped bytes.Buffer
+}
+
+// Write writes the provided string directly without any escaping.
+func (w *pairWriter) Write(s string) {
+ w.escaped.WriteString(s)
+ w.unescaped.WriteString(s)
+}
+
+// Escape writes the provided string, escaping the string for the
+// escaped output.
+func (w *pairWriter) Escape(s string, allowReserved bool) {
+ w.unescaped.WriteString(s)
+ if allowReserved {
+ w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode))
+ } else {
+ w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
+ }
+}
+
+// Escaped returns the escaped string.
+func (w *pairWriter) Escaped() string {
+ return w.escaped.String()
+}
+
+// Unescaped returns the unescaped string.
+func (w *pairWriter) Unescaped() string {
+ return w.unescaped.String()
+}
+
+// A uriTemplate is a parsed representation of a URI template.
+type uriTemplate struct {
+ raw string
+ parts []templatePart
+}
+
+// parse parses a URI template string into a uriTemplate object.
+func parse(rawTemplate string) (*uriTemplate, error) {
+ split := strings.Split(rawTemplate, "{")
+ parts := make([]templatePart, len(split)*2-1)
+ for i, s := range split {
+ if i == 0 {
+ if strings.Contains(s, "}") {
+ return nil, errors.New("unexpected }")
+ }
+ parts[i].raw = s
+ continue
+ }
+ subsplit := strings.Split(s, "}")
+ if len(subsplit) != 2 {
+ return nil, errors.New("malformed template")
+ }
+ expression := subsplit[0]
+ var err error
+ parts[i*2-1], err = parseExpression(expression)
+ if err != nil {
+ return nil, err
+ }
+ parts[i*2].raw = subsplit[1]
+ }
+ return &uriTemplate{
+ raw: rawTemplate,
+ parts: parts,
+ }, nil
+}
+
+type templatePart struct {
+ raw string
+ terms []templateTerm
+ first string
+ sep string
+ named bool
+ ifemp string
+ allowReserved bool
+}
+
+type templateTerm struct {
+ name string
+ explode bool
+ truncate int
+}
+
+func parseExpression(expression string) (result templatePart, err error) {
+ switch expression[0] {
+ case '+':
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ case '.':
+ result.first = "."
+ result.sep = "."
+ expression = expression[1:]
+ case '/':
+ result.first = "/"
+ result.sep = "/"
+ expression = expression[1:]
+ case ';':
+ result.first = ";"
+ result.sep = ";"
+ result.named = true
+ expression = expression[1:]
+ case '?':
+ result.first = "?"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '&':
+ result.first = "&"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '#':
+ result.first = "#"
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ default:
+ result.sep = ","
+ }
+ rawterms := strings.Split(expression, ",")
+ result.terms = make([]templateTerm, len(rawterms))
+ for i, raw := range rawterms {
+ result.terms[i], err = parseTerm(raw)
+ if err != nil {
+ break
+ }
+ }
+ return result, err
+}
+
+func parseTerm(term string) (result templateTerm, err error) {
+ // TODO(djd): Remove "*" suffix parsing once we check that no APIs have
+ // mistakenly used that attribute.
+ if strings.HasSuffix(term, "*") {
+ result.explode = true
+ term = term[:len(term)-1]
+ }
+ split := strings.Split(term, ":")
+ if len(split) == 1 {
+ result.name = term
+ } else if len(split) == 2 {
+ result.name = split[0]
+ var parsed int64
+ parsed, err = strconv.ParseInt(split[1], 10, 0)
+ result.truncate = int(parsed)
+ } else {
+ err = errors.New("multiple colons in same term")
+ }
+ if !validname.MatchString(result.name) {
+ err = errors.New("not a valid name: " + result.name)
+ }
+ if result.explode && result.truncate > 0 {
+ err = errors.New("both explode and prefix modifiers on same term")
+ }
+ return result, err
+}
+
+// Expand expands a URI template with a set of values to produce the
+// resultant URI. Two forms of the result are returned: one with all the
+// elements escaped, and one with the elements unescaped.
+func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) {
+ var w pairWriter
+ for _, p := range t.parts {
+ p.expand(&w, values)
+ }
+ return w.Escaped(), w.Unescaped()
+}
+
+func (tp *templatePart) expand(w *pairWriter, values map[string]string) {
+ if len(tp.raw) > 0 {
+ w.Write(tp.raw)
+ return
+ }
+ var first = true
+ for _, term := range tp.terms {
+ value, exists := values[term.name]
+ if !exists {
+ continue
+ }
+ if first {
+ w.Write(tp.first)
+ first = false
+ } else {
+ w.Write(tp.sep)
+ }
+ tp.expandString(w, term, value)
+ }
+}
+
+func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) {
+ if tp.named {
+ w.Write(name)
+ if empty {
+ w.Write(tp.ifemp)
+ } else {
+ w.Write("=")
+ }
+ }
+}
+
+func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) {
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ tp.expandName(w, t.name, len(s) == 0)
+ w.Escape(s, tp.allowReserved)
+}
diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go
new file mode 100644
index 0000000000..2e70b81543
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uritemplates
+
+// Expand parses then expands a URI template with a set of values to produce
+// the resultant URI. Two forms of the result are returned: one with all the
+// elements escaped, and one with the elements unescaped.
+func Expand(path string, values map[string]string) (escaped, unescaped string, err error) {
+ template, err := parse(path)
+ if err != nil {
+ return "", "", err
+ }
+ escaped, unescaped = template.Expand(values)
+ return escaped, unescaped, nil
+}
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
new file mode 100644
index 0000000000..b5a685e705
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -0,0 +1,8 @@
+// Copyright 2022 Google LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+// Version is the current tagged release of the library.
+const Version = "0.93.0"
diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go
deleted file mode 100644
index d06f918b0e..0000000000
--- a/vendor/google.golang.org/api/option/credentials_go19.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 Google LLC.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.9
-
-package option
-
-import (
- "golang.org/x/oauth2/google"
- "google.golang.org/api/internal"
-)
-
-type withCreds google.Credentials
-
-func (w *withCreds) Apply(o *internal.DialSettings) {
- o.Credentials = (*google.Credentials)(w)
-}
-
-// WithCredentials returns a ClientOption that authenticates API calls.
-func WithCredentials(creds *google.Credentials) ClientOption {
- return (*withCreds)(creds)
-}
diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go
deleted file mode 100644
index 0ce107a624..0000000000
--- a/vendor/google.golang.org/api/option/credentials_notgo19.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 Google LLC.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.9
-
-package option
-
-import (
- "golang.org/x/oauth2/google"
- "google.golang.org/api/internal"
-)
-
-type withCreds google.DefaultCredentials
-
-func (w *withCreds) Apply(o *internal.DialSettings) {
- o.Credentials = (*google.DefaultCredentials)(w)
-}
-
-func WithCredentials(creds *google.DefaultCredentials) ClientOption {
- return (*withCreds)(creds)
-}
diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go
index ed0b7aaf13..343a5a965e 100644
--- a/vendor/google.golang.org/api/option/internaloption/internaloption.go
+++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go
@@ -6,6 +6,7 @@
package internaloption
import (
+ "golang.org/x/oauth2/google"
"google.golang.org/api/internal"
"google.golang.org/api/option"
)
@@ -66,6 +67,21 @@ func (e enableDirectPath) Apply(o *internal.DialSettings) {
o.EnableDirectPath = bool(e)
}
+// AllowNonDefaultServiceAccount returns a ClientOption that overrides the default
+// requirement for using the default service account for DirectPath.
+//
+// It should only be used internally by generated clients.
+// This is an EXPERIMENTAL API and may be changed or removed in the future.
+func AllowNonDefaultServiceAccount(nd bool) option.ClientOption {
+ return allowNonDefaultServiceAccount(nd)
+}
+
+type allowNonDefaultServiceAccount bool
+
+func (a allowNonDefaultServiceAccount) Apply(o *internal.DialSettings) {
+ o.AllowNonDefaultServiceAccount = bool(a)
+}
+
// WithDefaultAudience returns a ClientOption that specifies a default audience
// to be used as the audience field ("aud") for the JWT token authentication.
//
@@ -106,3 +122,15 @@ type enableJwtWithScope bool
func (w enableJwtWithScope) Apply(o *internal.DialSettings) {
o.EnableJwtWithScope = bool(w)
}
+
+// WithCredentials returns a client option to specify credentials which will be used to authenticate API calls.
+// This credential takes precedence over all other credential options.
+func WithCredentials(creds *google.Credentials) option.ClientOption {
+ return (*withCreds)(creds)
+}
+
+type withCreds google.Credentials
+
+func (w *withCreds) Apply(o *internal.DialSettings) {
+ o.InternalCredentials = (*google.Credentials)(w)
+}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
index 9ff697e0b8..27ba9eab01 100644
--- a/vendor/google.golang.org/api/option/option.go
+++ b/vendor/google.golang.org/api/option/option.go
@@ -10,6 +10,7 @@ import (
"net/http"
"golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
"google.golang.org/api/internal"
"google.golang.org/api/internal/impersonate"
"google.golang.org/grpc"
@@ -144,8 +145,6 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
// connections that requests will be balanced between.
-//
-// This is an EXPERIMENTAL API and may be changed or removed in the future.
func WithGRPCConnectionPool(size int) ClientOption {
return withGRPCConnectionPool(size)
}
@@ -288,10 +287,10 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) {
// service account SA2 while using delegate service accounts DSA1 and DSA2,
// the following must be true:
//
-// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on
-// DSA1.
-// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2.
-// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2.
+// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on
+// DSA1.
+// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2.
+// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2.
//
// The resulting impersonated credential will either have the default scopes of
// the client being instantiating or the scopes from WithScopes if provided.
@@ -328,3 +327,14 @@ func (i impersonateServiceAccount) Apply(o *internal.DialSettings) {
o.ImpersonationConfig.Delegates = make([]string, len(i.delegates))
copy(o.ImpersonationConfig.Delegates, i.delegates)
}
+
+type withCreds google.Credentials
+
+func (w *withCreds) Apply(o *internal.DialSettings) {
+ o.Credentials = (*google.Credentials)(w)
+}
+
+// WithCredentials returns a ClientOption that authenticates API calls.
+func WithCredentials(creds *google.Credentials) ClientOption {
+ return (*withCreds)(creds)
+}
diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go
index 04aefec0af..21d0251531 100644
--- a/vendor/google.golang.org/api/transport/cert/default_cert.go
+++ b/vendor/google.golang.org/api/transport/cert/default_cert.go
@@ -14,32 +14,19 @@ package cert
import (
"crypto/tls"
- "crypto/x509"
- "encoding/json"
"errors"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "os/user"
- "path/filepath"
"sync"
- "time"
-)
-
-const (
- metadataPath = ".secureConnect"
- metadataFile = "context_aware_metadata.json"
)
// defaultCertData holds all the variables pertaining to
// the default certficate source created by DefaultSource.
+//
+// A singleton model is used to allow the source to be reused
+// by the transport layer.
type defaultCertData struct {
- once sync.Once
- source Source
- err error
- cachedCertMutex sync.Mutex
- cachedCert *tls.Certificate
+ once sync.Once
+ source Source
+ err error
}
var (
@@ -49,93 +36,23 @@ var (
// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate.
type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
-// DefaultSource returns a certificate source that execs the command specified
-// in the file at ~/.secureConnect/context_aware_metadata.json
+// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable.
+var errSourceUnavailable = errors.New("certificate source is unavailable")
+
+// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource.
+// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource.
//
-// If that file does not exist, a nil source is returned.
+// If neither source is available (due to missing configurations), a nil Source and a nil Error are
+// returned to indicate that a default certificate source is unavailable.
func DefaultSource() (Source, error) {
defaultCert.once.Do(func() {
- defaultCert.source, defaultCert.err = newSecureConnectSource()
+ defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.source, defaultCert.err = NewSecureConnectSource("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.source, defaultCert.err = nil, nil
+ }
+ }
})
return defaultCert.source, defaultCert.err
}
-
-type secureConnectSource struct {
- metadata secureConnectMetadata
-}
-
-type secureConnectMetadata struct {
- Cmd []string `json:"cert_provider_command"`
-}
-
-// newSecureConnectSource creates a secureConnectSource by reading the well-known file.
-func newSecureConnectSource() (Source, error) {
- user, err := user.Current()
- if err != nil {
- // Ignore.
- return nil, nil
- }
- filename := filepath.Join(user.HomeDir, metadataPath, metadataFile)
- file, err := ioutil.ReadFile(filename)
- if os.IsNotExist(err) {
- // Ignore.
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
-
- var metadata secureConnectMetadata
- if err := json.Unmarshal(file, &metadata); err != nil {
- return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err)
- }
- if err := validateMetadata(metadata); err != nil {
- return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err)
- }
- return (&secureConnectSource{
- metadata: metadata,
- }).getClientCertificate, nil
-}
-
-func validateMetadata(metadata secureConnectMetadata) error {
- if len(metadata.Cmd) == 0 {
- return errors.New("empty cert_provider_command")
- }
- return nil
-}
-
-func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- defaultCert.cachedCertMutex.Lock()
- defer defaultCert.cachedCertMutex.Unlock()
- if defaultCert.cachedCert != nil && !isCertificateExpired(defaultCert.cachedCert) {
- return defaultCert.cachedCert, nil
- }
- // Expand OS environment variables in the cert provider command such as "$HOME".
- for i := 0; i < len(s.metadata.Cmd); i++ {
- s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i])
- }
- command := s.metadata.Cmd
- data, err := exec.Command(command[0], command[1:]...).Output()
- if err != nil {
- // TODO(cbro): read stderr for error message? Might contain sensitive info.
- return nil, err
- }
- cert, err := tls.X509KeyPair(data, data)
- if err != nil {
- return nil, err
- }
- defaultCert.cachedCert = &cert
- return &cert, nil
-}
-
-// isCertificateExpired returns true if the given cert is expired or invalid.
-func isCertificateExpired(cert *tls.Certificate) bool {
- if len(cert.Certificate) == 0 {
- return true
- }
- parsed, err := x509.ParseCertificate(cert.Certificate[0])
- if err != nil {
- return true
- }
- return time.Now().After(parsed.NotAfter)
-}
diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go
new file mode 100644
index 0000000000..eaa52e07c0
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go
@@ -0,0 +1,56 @@
+// Copyright 2022 Google LLC.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cert contains certificate tools for Google API clients.
+// This package is intended to be used with crypto/tls.Config.GetClientCertificate.
+//
+// The certificates can be used to satisfy Google's Endpoint Validation.
+// See https://cloud.google.com/endpoint-verification/docs/overview
+//
+// This package is not intended for use by end developers. Use the
+// google.golang.org/api/option package to configure API clients.
+package cert
+
+import (
+ "crypto/tls"
+ "errors"
+ "os"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client"
+)
+
+type ecpSource struct {
+ key *client.Key
+}
+
+// NewEnterpriseCertificateProxySource creates a certificate source
+// using the Enterprise Certificate Proxy client, which delegates
+// certifcate related operations to an OS-specific "signer binary"
+// that communicates with the native keystore (ex. keychain on MacOS).
+//
+// The configFilePath points to a config file containing relevant parameters
+// such as the certificate issuer and the location of the signer binary.
+// If configFilePath is empty, the client will attempt to load the config from
+// a well-known gcloud location.
+func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) {
+ key, err := client.Cred(configFilePath)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // Config file missing means Enterprise Certificate Proxy is not supported.
+ return nil, errSourceUnavailable
+ }
+ return nil, err
+ }
+
+ return (&ecpSource{
+ key: key,
+ }).getClientCertificate, nil
+}
+
+func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ var cert tls.Certificate
+ cert.PrivateKey = s.key
+ cert.Certificate = s.key.CertificateChain()
+ return &cert, nil
+}
diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go
new file mode 100644
index 0000000000..5913cab801
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go
@@ -0,0 +1,123 @@
+// Copyright 2022 Google LLC.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cert contains certificate tools for Google API clients.
+// This package is intended to be used with crypto/tls.Config.GetClientCertificate.
+//
+// The certificates can be used to satisfy Google's Endpoint Validation.
+// See https://cloud.google.com/endpoint-verification/docs/overview
+//
+// This package is not intended for use by end developers. Use the
+// google.golang.org/api/option package to configure API clients.
+package cert
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+const (
+ metadataPath = ".secureConnect"
+ metadataFile = "context_aware_metadata.json"
+)
+
+type secureConnectSource struct {
+ metadata secureConnectMetadata
+
+ // Cache the cert to avoid executing helper command repeatedly.
+ cachedCertMutex sync.Mutex
+ cachedCert *tls.Certificate
+}
+
+type secureConnectMetadata struct {
+ Cmd []string `json:"cert_provider_command"`
+}
+
+// NewSecureConnectSource creates a certificate source using
+// the Secure Connect Helper and its associated metadata file.
+//
+// The configFilePath points to the location of the context aware metadata file.
+// If configFilePath is empty, use the default context aware metadata location.
+func NewSecureConnectSource(configFilePath string) (Source, error) {
+ if configFilePath == "" {
+ user, err := user.Current()
+ if err != nil {
+ // Error locating the default config means Secure Connect is not supported.
+ return nil, errSourceUnavailable
+ }
+ configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile)
+ }
+
+ file, err := ioutil.ReadFile(configFilePath)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // Config file missing means Secure Connect is not supported.
+ return nil, errSourceUnavailable
+ }
+ return nil, err
+ }
+
+ var metadata secureConnectMetadata
+ if err := json.Unmarshal(file, &metadata); err != nil {
+ return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err)
+ }
+ if err := validateMetadata(metadata); err != nil {
+ return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err)
+ }
+ return (&secureConnectSource{
+ metadata: metadata,
+ }).getClientCertificate, nil
+}
+
+func validateMetadata(metadata secureConnectMetadata) error {
+ if len(metadata.Cmd) == 0 {
+ return errors.New("empty cert_provider_command")
+ }
+ return nil
+}
+
+func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ s.cachedCertMutex.Lock()
+ defer s.cachedCertMutex.Unlock()
+ if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) {
+ return s.cachedCert, nil
+ }
+ // Expand OS environment variables in the cert provider command such as "$HOME".
+ for i := 0; i < len(s.metadata.Cmd); i++ {
+ s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i])
+ }
+ command := s.metadata.Cmd
+ data, err := exec.Command(command[0], command[1:]...).Output()
+ if err != nil {
+ return nil, err
+ }
+ cert, err := tls.X509KeyPair(data, data)
+ if err != nil {
+ return nil, err
+ }
+ s.cachedCert = &cert
+ return &cert, nil
+}
+
+// isCertificateExpired returns true if the given cert is expired or invalid.
+func isCertificateExpired(cert *tls.Certificate) bool {
+ if len(cert.Certificate) == 0 {
+ return true
+ }
+ parsed, err := x509.ParseCertificate(cert.Certificate[0])
+ if err != nil {
+ return true
+ }
+ return time.Now().After(parsed.NotAfter)
+}
diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
index 727a5beff1..c86f56507f 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial.go
@@ -12,6 +12,8 @@ import (
"crypto/tls"
"errors"
"log"
+ "net"
+ "os"
"strings"
"cloud.google.com/go/compute/metadata"
@@ -29,6 +31,12 @@ import (
_ "google.golang.org/grpc/balancer/grpclb"
)
+// Check env to disable DirectPath traffic.
+const disableDirectPath = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH"
+
+// Check env to decide if using google-c2p resolver for DirectPath traffic.
+const enableDirectPathXds = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"
+
// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
var appengineDialerHook func(context.Context) grpc.DialOption
@@ -134,23 +142,31 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
o.QuotaProject = internal.QuotaProjectFromCreds(creds)
}
- // Attempt Direct Path only if:
- // * The endpoint is a host:port (or dns:///host:port).
- // * Credentials are obtained via GCE metadata server, using the default
- // service account.
- if o.EnableDirectPath && checkDirectPathEndPoint(endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) && metadata.OnGCE() {
- if !strings.HasPrefix(endpoint, "dns:///") {
- endpoint = "dns:///" + endpoint
- }
+ // Attempt Direct Path:
+ if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
grpcOpts = []grpc.DialOption{
- grpc.WithCredentialsBundle(
- grpcgoogle.NewComputeEngineCredentials(),
- ),
- // For now all DirectPath go clients will be using the following lb config, but in future
- // when different services need different configs, then we should change this to a
- // per-service config.
- grpc.WithDisableServiceConfig(),
- grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`),
+ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))}
+ if timeoutDialerOption != nil {
+ grpcOpts = append(grpcOpts, timeoutDialerOption)
+ }
+ // Check if google-c2p resolver is enabled for DirectPath
+ if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") {
+ // google-c2p resolver target must not have a port number
+ if addr, _, err := net.SplitHostPort(endpoint); err == nil {
+ endpoint = "google-c2p-experimental:///" + addr
+ } else {
+ endpoint = "google-c2p-experimental:///" + endpoint
+ }
+ } else {
+ if !strings.HasPrefix(endpoint, "dns:///") {
+ endpoint = "dns:///" + endpoint
+ }
+ grpcOpts = append(grpcOpts,
+ // For now all DirectPath go clients will be using the following lb config, but in future
+ // when different services need different configs, then we should change this to a
+ // per-service config.
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
}
// TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
} else {
@@ -183,14 +199,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
}
- // TODO(weiranf): This socketopt dialer will be used by default at some
- // point when isDirectPathEnabled will default to true, we guard it by
- // the Directpath env var for now once we can introspect user defined
- // dialer (https://github.com/grpc/grpc-go/issues/2795).
- if timeoutDialerOption != nil && o.EnableDirectPath && checkDirectPathEndPoint(endpoint) && metadata.OnGCE() {
- grpcOpts = append(grpcOpts, timeoutDialerOption)
- }
-
return grpc.DialContext(ctx, endpoint, grpcOpts...)
}
@@ -228,7 +236,20 @@ func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string)
return metadata, nil
}
-func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool {
+func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool {
+ if !o.EnableDirectPath {
+ return false
+ }
+ if !checkDirectPathEndPoint(endpoint) {
+ return false
+ }
+ if strings.EqualFold(os.Getenv(disableDirectPath), "true") {
+ return false
+ }
+ return true
+}
+
+func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource, o *internal.DialSettings) bool {
if ts == nil {
return false
}
@@ -239,6 +260,9 @@ func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool {
if tok == nil {
return false
}
+ if o.AllowNonDefaultServiceAccount {
+ return true
+ }
if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" {
return false
}
diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go
index 2c6aef2264..fd3dc0565d 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build appengine
// +build appengine
package grpc
diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go
index 0e4f388968..4bf9e82172 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build go1.11 && linux
// +build go1.11,linux
package grpc
diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go
index 071586e944..78004f0475 100644
--- a/vendor/google.golang.org/api/transport/internal/dca/dca.go
+++ b/vendor/google.golang.org/api/transport/internal/dca/dca.go
@@ -6,17 +6,17 @@
// Authentication according to https://google.aip.dev/auth/4114
//
// The overall logic for DCA is as follows:
-// 1. If both endpoint override and client certificate are specified, use them as is.
-// 2. If user does not specify client certificate, we will attempt to use default
-// client certificate.
-// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if
-// client certificate is available and defaultEndpoint otherwise.
+// 1. If both endpoint override and client certificate are specified, use them as is.
+// 2. If user does not specify client certificate, we will attempt to use default
+// client certificate.
+// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if
+// client certificate is available and defaultEndpoint otherwise.
//
// Implications of the above logic:
-// 1. If the user specifies a non-mTLS endpoint override but client certificate is
-// available, we will pass along the cert anyway and let the server decide what to do.
-// 2. If the user specifies an mTLS endpoint override but client certificate is not
-// available, we will not fail-fast, but let backend throw error when connecting.
+// 1. If the user specifies a non-mTLS endpoint override but client certificate is
+// available, we will pass along the cert anyway and let the server decide what to do.
+// 2. If the user specifies an mTLS endpoint override but client certificate is not
+// available, we will not fail-fast, but let backend throw error when connecting.
//
// We would like to avoid introducing client-side logic that parses whether the
// endpoint override is an mTLS url, since the url pattern may change at anytime.
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
new file mode 100644
index 0000000000..1258803152
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
@@ -0,0 +1,336 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
+// source: google/rpc/code.proto
+
+package code
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The canonical error codes for gRPC APIs.
+//
+//
+// Sometimes multiple error codes may apply. Services should return
+// the most specific error code that applies. For example, prefer
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
+type Code int32
+
+const (
+ // Not an error; returned on success
+ //
+ // HTTP Mapping: 200 OK
+ Code_OK Code = 0
+ // The operation was cancelled, typically by the caller.
+ //
+ // HTTP Mapping: 499 Client Closed Request
+ Code_CANCELLED Code = 1
+ // Unknown error. For example, this error may be returned when
+ // a `Status` value received from another address space belongs to
+ // an error space that is not known in this address space. Also
+ // errors raised by APIs that do not return enough error information
+ // may be converted to this error.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ Code_UNKNOWN Code = 2
+ // The client specified an invalid argument. Note that this differs
+ // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
+ // that are problematic regardless of the state of the system
+ // (e.g., a malformed file name).
+ //
+ // HTTP Mapping: 400 Bad Request
+ Code_INVALID_ARGUMENT Code = 3
+ // The deadline expired before the operation could complete. For operations
+ // that change the state of the system, this error may be returned
+ // even if the operation has completed successfully. For example, a
+ // successful response from a server could have been delayed long
+ // enough for the deadline to expire.
+ //
+ // HTTP Mapping: 504 Gateway Timeout
+ Code_DEADLINE_EXCEEDED Code = 4
+ // Some requested entity (e.g., file or directory) was not found.
+ //
+ // Note to server developers: if a request is denied for an entire class
+ // of users, such as gradual feature rollout or undocumented whitelist,
+ // `NOT_FOUND` may be used. If a request is denied for some users within
+ // a class of users, such as user-based access control, `PERMISSION_DENIED`
+ // must be used.
+ //
+ // HTTP Mapping: 404 Not Found
+ Code_NOT_FOUND Code = 5
+ // The entity that a client attempted to create (e.g., file or directory)
+ // already exists.
+ //
+ // HTTP Mapping: 409 Conflict
+ Code_ALREADY_EXISTS Code = 6
+ // The caller does not have permission to execute the specified
+ // operation. `PERMISSION_DENIED` must not be used for rejections
+ // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
+ // instead for those errors). `PERMISSION_DENIED` must not be
+ // used if the caller can not be identified (use `UNAUTHENTICATED`
+ // instead for those errors). This error code does not imply the
+ // request is valid or the requested entity exists or satisfies
+ // other pre-conditions.
+ //
+ // HTTP Mapping: 403 Forbidden
+ Code_PERMISSION_DENIED Code = 7
+ // The request does not have valid authentication credentials for the
+ // operation.
+ //
+ // HTTP Mapping: 401 Unauthorized
+ Code_UNAUTHENTICATED Code = 16
+ // Some resource has been exhausted, perhaps a per-user quota, or
+ // perhaps the entire file system is out of space.
+ //
+ // HTTP Mapping: 429 Too Many Requests
+ Code_RESOURCE_EXHAUSTED Code = 8
+ // The operation was rejected because the system is not in a state
+ // required for the operation's execution. For example, the directory
+ // to be deleted is non-empty, an rmdir operation is applied to
+ // a non-directory, etc.
+ //
+ // Service implementors can use the following guidelines to decide
+ // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
+ // (a) Use `UNAVAILABLE` if the client can retry just the failing call.
+ // (b) Use `ABORTED` if the client should retry at a higher level
+ // (e.g., when a client-specified test-and-set fails, indicating the
+ // client should restart a read-modify-write sequence).
+ // (c) Use `FAILED_PRECONDITION` if the client should not retry until
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // fails because the directory is non-empty, `FAILED_PRECONDITION`
+ // should be returned since the client should not retry unless
+ // the files are deleted from the directory.
+ //
+ // HTTP Mapping: 400 Bad Request
+ Code_FAILED_PRECONDITION Code = 9
+ // The operation was aborted, typically due to a concurrency issue such as
+ // a sequencer check failure or transaction abort.
+ //
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+ // `ABORTED`, and `UNAVAILABLE`.
+ //
+ // HTTP Mapping: 409 Conflict
+ Code_ABORTED Code = 10
+ // The operation was attempted past the valid range. E.g., seeking or
+ // reading past end-of-file.
+ //
+ // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
+ // be fixed if the system state changes. For example, a 32-bit file
+ // system will generate `INVALID_ARGUMENT` if asked to read at an
+ // offset that is not in the range [0,2^32-1], but it will generate
+ // `OUT_OF_RANGE` if asked to read from an offset past the current
+ // file size.
+ //
+ // There is a fair bit of overlap between `FAILED_PRECONDITION` and
+ // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
+ // error) when it applies so that callers who are iterating through
+ // a space can easily look for an `OUT_OF_RANGE` error to detect when
+ // they are done.
+ //
+ // HTTP Mapping: 400 Bad Request
+ Code_OUT_OF_RANGE Code = 11
+ // The operation is not implemented or is not supported/enabled in this
+ // service.
+ //
+ // HTTP Mapping: 501 Not Implemented
+ Code_UNIMPLEMENTED Code = 12
+ // Internal errors. This means that some invariants expected by the
+ // underlying system have been broken. This error code is reserved
+ // for serious errors.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ Code_INTERNAL Code = 13
+ // The service is currently unavailable. This is most likely a
+ // transient condition, which can be corrected by retrying with
+ // a backoff. Note that it is not always safe to retry
+ // non-idempotent operations.
+ //
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+ // `ABORTED`, and `UNAVAILABLE`.
+ //
+ // HTTP Mapping: 503 Service Unavailable
+ Code_UNAVAILABLE Code = 14
+ // Unrecoverable data loss or corruption.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ Code_DATA_LOSS Code = 15
+)
+
+// Enum value maps for Code.
+var (
+ Code_name = map[int32]string{
+ 0: "OK",
+ 1: "CANCELLED",
+ 2: "UNKNOWN",
+ 3: "INVALID_ARGUMENT",
+ 4: "DEADLINE_EXCEEDED",
+ 5: "NOT_FOUND",
+ 6: "ALREADY_EXISTS",
+ 7: "PERMISSION_DENIED",
+ 16: "UNAUTHENTICATED",
+ 8: "RESOURCE_EXHAUSTED",
+ 9: "FAILED_PRECONDITION",
+ 10: "ABORTED",
+ 11: "OUT_OF_RANGE",
+ 12: "UNIMPLEMENTED",
+ 13: "INTERNAL",
+ 14: "UNAVAILABLE",
+ 15: "DATA_LOSS",
+ }
+ Code_value = map[string]int32{
+ "OK": 0,
+ "CANCELLED": 1,
+ "UNKNOWN": 2,
+ "INVALID_ARGUMENT": 3,
+ "DEADLINE_EXCEEDED": 4,
+ "NOT_FOUND": 5,
+ "ALREADY_EXISTS": 6,
+ "PERMISSION_DENIED": 7,
+ "UNAUTHENTICATED": 16,
+ "RESOURCE_EXHAUSTED": 8,
+ "FAILED_PRECONDITION": 9,
+ "ABORTED": 10,
+ "OUT_OF_RANGE": 11,
+ "UNIMPLEMENTED": 12,
+ "INTERNAL": 13,
+ "UNAVAILABLE": 14,
+ "DATA_LOSS": 15,
+ }
+)
+
+func (x Code) Enum() *Code {
+ p := new(Code)
+ *p = x
+ return p
+}
+
+func (x Code) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Code) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_rpc_code_proto_enumTypes[0].Descriptor()
+}
+
+func (Code) Type() protoreflect.EnumType {
+ return &file_google_rpc_code_proto_enumTypes[0]
+}
+
+func (x Code) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Code.Descriptor instead.
+func (Code) EnumDescriptor() ([]byte, []int) {
+ return file_google_rpc_code_proto_rawDescGZIP(), []int{0}
+}
+
+var File_google_rpc_code_proto protoreflect.FileDescriptor
+
+var file_google_rpc_code_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x72, 0x70, 0x63, 0x2a, 0xb7, 0x02, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02,
+ 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45,
+ 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02,
+ 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55,
+ 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49,
+ 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a,
+ 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e,
+ 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06,
+ 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44,
+ 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54,
+ 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x16, 0x0a, 0x12,
+ 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54,
+ 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50,
+ 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a,
+ 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55,
+ 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d,
+ 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12,
+ 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d,
+ 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x42, 0x58, 0x0a,
+ 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42,
+ 0x09, 0x43, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+ 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3b, 0x63, 0x6f, 0x64,
+ 0x65, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_rpc_code_proto_rawDescOnce sync.Once
+ file_google_rpc_code_proto_rawDescData = file_google_rpc_code_proto_rawDesc
+)
+
+func file_google_rpc_code_proto_rawDescGZIP() []byte {
+ file_google_rpc_code_proto_rawDescOnce.Do(func() {
+ file_google_rpc_code_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_code_proto_rawDescData)
+ })
+ return file_google_rpc_code_proto_rawDescData
+}
+
+var file_google_rpc_code_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_rpc_code_proto_goTypes = []interface{}{
+ (Code)(0), // 0: google.rpc.Code
+}
+var file_google_rpc_code_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_code_proto_init() }
+func file_google_rpc_code_proto_init() {
+ if File_google_rpc_code_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_rpc_code_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_rpc_code_proto_goTypes,
+ DependencyIndexes: file_google_rpc_code_proto_depIdxs,
+ EnumInfos: file_google_rpc_code_proto_enumTypes,
+ }.Build()
+ File_google_rpc_code_proto = out.File
+ file_google_rpc_code_proto_rawDesc = nil
+ file_google_rpc_code_proto_goTypes = nil
+ file_google_rpc_code_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
new file mode 100644
index 0000000000..1c7b93ec16
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -0,0 +1,1278 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.2
+// source: google/rpc/error_details.proto
+
+package errdetails
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying. If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retries have been reached or a maximum retry delay cap has been
+// reached.
+type RetryInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Clients should wait at least this long between retrying the same request.
+ RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"`
+}
+
+func (x *RetryInfo) Reset() {
+ *x = RetryInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RetryInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RetryInfo) ProtoMessage() {}
+
+func (x *RetryInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead.
+func (*RetryInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RetryInfo) GetRetryDelay() *durationpb.Duration {
+ if x != nil {
+ return x.RetryDelay
+ }
+ return nil
+}
+
+// Describes additional debugging info.
+type DebugInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The stack trace entries indicating where the error occurred.
+ StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"`
+ // Additional debugging information provided by the server.
+ Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+}
+
+func (x *DebugInfo) Reset() {
+ *x = DebugInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DebugInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DebugInfo) ProtoMessage() {}
+
+func (x *DebugInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead.
+func (*DebugInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *DebugInfo) GetStackEntries() []string {
+ if x != nil {
+ return x.StackEntries
+ }
+ return nil
+}
+
+func (x *DebugInfo) GetDetail() string {
+ if x != nil {
+ return x.Detail
+ }
+ return ""
+}
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded. If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryInfo and Help types for other details about handling a
+// quota failure.
+type QuotaFailure struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes all quota violations.
+ Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *QuotaFailure) Reset() {
+ *x = QuotaFailure{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QuotaFailure) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure) ProtoMessage() {}
+
+func (x *QuotaFailure) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead.
+func (*QuotaFailure) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation {
+ if x != nil {
+ return x.Violations
+ }
+ return nil
+}
+
+// Describes the cause of the error with structured details.
+//
+// Example of an error when contacting the "pubsub.googleapis.com" API when it
+// is not enabled:
+//
+// { "reason": "API_DISABLED"
+// "domain": "googleapis.com"
+// "metadata": {
+// "resource": "projects/123",
+// "service": "pubsub.googleapis.com"
+// }
+// }
+//
+// This response indicates that the pubsub.googleapis.com API is not enabled.
+//
+// Example of an error that is returned when attempting to create a Spanner
+// instance in a region that is out of stock:
+//
+// { "reason": "STOCKOUT"
+// "domain": "spanner.googleapis.com",
+// "metadata": {
+// "availableRegions": "us-central1,us-east2"
+// }
+// }
+type ErrorInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The reason of the error. This is a constant value that identifies the
+ // proximate cause of the error. Error reasons are unique within a particular
+ // domain of errors. This should be at most 63 characters and match
+ // /[A-Z0-9_]+/.
+ Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"`
+ // The logical grouping to which the "reason" belongs. The error domain
+ // is typically the registered service name of the tool or product that
+ // generates the error. Example: "pubsub.googleapis.com". If the error is
+ // generated by some common infrastructure, the error domain must be a
+ // globally unique value that identifies the infrastructure. For Google API
+ // infrastructure, the error domain is "googleapis.com".
+ Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"`
+ // Additional structured details about this error.
+ //
+ // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in
+ // length. When identifying the current value of an exceeded limit, the units
+ // should be contained in the key, not the value. For example, rather than
+ // {"instanceLimit": "100/request"}, should be returned as,
+ // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of
+ // instances that can be created in a single (batch) request.
+ Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ErrorInfo) Reset() {
+ *x = ErrorInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ErrorInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorInfo) ProtoMessage() {}
+
+func (x *ErrorInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead.
+func (*ErrorInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ErrorInfo) GetReason() string {
+ if x != nil {
+ return x.Reason
+ }
+ return ""
+}
+
+func (x *ErrorInfo) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *ErrorInfo) GetMetadata() map[string]string {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+// Describes what preconditions have failed.
+//
+// For example, if an RPC failed because it required the Terms of Service to be
+// acknowledged, it could list the terms of service violation in the
+// PreconditionFailure message.
+type PreconditionFailure struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes all precondition violations.
+ Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *PreconditionFailure) Reset() {
+ *x = PreconditionFailure{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreconditionFailure) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure) ProtoMessage() {}
+
+func (x *PreconditionFailure) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation {
+ if x != nil {
+ return x.Violations
+ }
+ return nil
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+type BadRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes all violations in a client request.
+ FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"`
+}
+
+func (x *BadRequest) Reset() {
+ *x = BadRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BadRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest) ProtoMessage() {}
+
+func (x *BadRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead.
+func (*BadRequest) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation {
+ if x != nil {
+ return x.FieldViolations
+ }
+ return nil
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+type RequestInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An opaque string that should only be interpreted by the service generating
+ // it. For example, it can be used to identify requests in the service's logs.
+ RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
+ // Any data that was used to serve this request. For example, an encrypted
+ // stack trace that can be sent back to the service provider for debugging.
+ ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+}
+
+func (x *RequestInfo) Reset() {
+ *x = RequestInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RequestInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestInfo) ProtoMessage() {}
+
+func (x *RequestInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead.
+func (*RequestInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *RequestInfo) GetRequestId() string {
+ if x != nil {
+ return x.RequestId
+ }
+ return ""
+}
+
+func (x *RequestInfo) GetServingData() string {
+ if x != nil {
+ return x.ServingData
+ }
+ return ""
+}
+
+// Describes the resource that is being accessed.
+type ResourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A name for the type of resource being accessed, e.g. "sql table",
+ // "cloud storage bucket", "file", "Google calendar"; or the type URL
+ // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+ ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ // The name of the resource being accessed. For example, a shared calendar
+ // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+ // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+ ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+ // The owner of the resource (optional).
+ // For example, "user:<owner email>" or "project:<Google developer project
+ // id>".
+ Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"`
+ // Describes what error is encountered when accessing this resource.
+ // For example, updating a cloud project may require the `writer` permission
+ // on the developer console project.
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *ResourceInfo) Reset() {
+ *x = ResourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceInfo) ProtoMessage() {}
+
+func (x *ResourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead.
+func (*ResourceInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ResourceInfo) GetResourceType() string {
+ if x != nil {
+ return x.ResourceType
+ }
+ return ""
+}
+
+func (x *ResourceInfo) GetResourceName() string {
+ if x != nil {
+ return x.ResourceName
+ }
+ return ""
+}
+
+func (x *ResourceInfo) GetOwner() string {
+ if x != nil {
+ return x.Owner
+ }
+ return ""
+}
+
+func (x *ResourceInfo) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+type Help struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // URL(s) pointing to additional information on handling the current error.
+ Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
+}
+
+func (x *Help) Reset() {
+ *x = Help{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Help) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help) ProtoMessage() {}
+
+func (x *Help) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help.ProtoReflect.Descriptor instead.
+func (*Help) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Help) GetLinks() []*Help_Link {
+ if x != nil {
+ return x.Links
+ }
+ return nil
+}
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+type LocalizedMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The locale used following the specification defined at
+ // http://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+ // Examples are: "en-US", "fr-CH", "es-MX"
+ Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"`
+ // The localized error message in the above locale.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *LocalizedMessage) Reset() {
+ *x = LocalizedMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LocalizedMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocalizedMessage) ProtoMessage() {}
+
+func (x *LocalizedMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead.
+func (*LocalizedMessage) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *LocalizedMessage) GetLocale() string {
+ if x != nil {
+ return x.Locale
+ }
+ return ""
+}
+
+func (x *LocalizedMessage) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+// A message type used to describe a single quota violation. For example, a
+// daily quota or a custom quota that was exceeded.
+type QuotaFailure_Violation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The subject on which the quota check failed.
+ // For example, "clientip:<ip address of client>" or "project:<Google
+ // developer project id>".
+ Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"`
+ // A description of how the quota check failed. Clients can use this
+ // description to find more about the quota configuration in the service's
+ // public documentation, or find the relevant quota limit to adjust through
+ // developer console.
+ //
+ // For example: "Service disabled" or "Daily Limit for read operations
+ // exceeded".
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *QuotaFailure_Violation) Reset() {
+ *x = QuotaFailure_Violation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QuotaFailure_Violation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure_Violation) ProtoMessage() {}
+
+func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead.
+func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *QuotaFailure_Violation) GetSubject() string {
+ if x != nil {
+ return x.Subject
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// A message type used to describe a single precondition failure.
+type PreconditionFailure_Violation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of PreconditionFailure. We recommend using a service-specific
+ // enum type to define the supported precondition violation subjects. For
+ // example, "TOS" for "Terms of Service violation".
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The subject, relative to the type, that failed.
+ // For example, "google.com/cloud" relative to the "TOS" type would indicate
+ // which terms of service is being referenced.
+ Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"`
+ // A description of how the precondition failed. Developers can use this
+ // description to understand how to fix the failure.
+ //
+ // For example: "Terms of service not accepted".
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *PreconditionFailure_Violation) Reset() {
+ *x = PreconditionFailure_Violation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreconditionFailure_Violation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure_Violation) ProtoMessage() {}
+
+func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *PreconditionFailure_Violation) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *PreconditionFailure_Violation) GetSubject() string {
+ if x != nil {
+ return x.Subject
+ }
+ return ""
+}
+
+func (x *PreconditionFailure_Violation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// A message type used to describe a single bad request field.
+type BadRequest_FieldViolation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A path leading to a field in the request body. The value will be a
+ // sequence of dot-separated identifiers that identify a protocol buffer
+ // field. E.g., "field_violations.field" would identify this field.
+ Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+ // A description of why the request element is bad.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *BadRequest_FieldViolation) Reset() {
+ *x = BadRequest_FieldViolation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BadRequest_FieldViolation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest_FieldViolation) ProtoMessage() {}
+
+func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead.
+func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *BadRequest_FieldViolation) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *BadRequest_FieldViolation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// Describes a URL link.
+type Help_Link struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes what the link offers.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // The URL of the link.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *Help_Link) Reset() {
+ *x = Help_Link{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Help_Link) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help_Link) ProtoMessage() {}
+
+func (x *Help_Link) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead.
+func (*Help_Link) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *Help_Link) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Help_Link) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+var File_google_rpc_error_details_proto protoreflect.FileDescriptor
+
+var file_google_rpc_error_details_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x09,
+ 0x52, 0x65, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74,
+ 0x72, 0x79, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79,
+ 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e,
+ 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72,
+ 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b,
+ 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69,
+ 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22,
+ 0x9b, 0x01, 0x0a, 0x0c, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65,
+ 0x12, 0x42, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
+ 0x63, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56,
+ 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x01,
+ 0x0a, 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72,
+ 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61,
+ 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72,
+ 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72,
+ 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
+ 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+ 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46,
+ 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09,
+ 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a,
+ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
+ 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
+ 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
+ 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
+ 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70,
+ 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c,
+ 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a,
+ 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42,
+ 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
+ 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70,
+ 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72,
+ 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_rpc_error_details_proto_rawDescOnce sync.Once
+ file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc
+)
+
+func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
+ file_google_rpc_error_details_proto_rawDescOnce.Do(func() {
+ file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData)
+ })
+ return file_google_rpc_error_details_proto_rawDescData
+}
+
+var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_google_rpc_error_details_proto_goTypes = []interface{}{
+ (*RetryInfo)(nil), // 0: google.rpc.RetryInfo
+ (*DebugInfo)(nil), // 1: google.rpc.DebugInfo
+ (*QuotaFailure)(nil), // 2: google.rpc.QuotaFailure
+ (*ErrorInfo)(nil), // 3: google.rpc.ErrorInfo
+ (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure
+ (*BadRequest)(nil), // 5: google.rpc.BadRequest
+ (*RequestInfo)(nil), // 6: google.rpc.RequestInfo
+ (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo
+ (*Help)(nil), // 8: google.rpc.Help
+ (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage
+ (*QuotaFailure_Violation)(nil), // 10: google.rpc.QuotaFailure.Violation
+ nil, // 11: google.rpc.ErrorInfo.MetadataEntry
+ (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation
+ (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation
+ (*Help_Link)(nil), // 14: google.rpc.Help.Link
+ (*durationpb.Duration)(nil), // 15: google.protobuf.Duration
+}
+var file_google_rpc_error_details_proto_depIdxs = []int32{
+ 15, // 0: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
+ 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
+ 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
+ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
+ 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
+ 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_error_details_proto_init() }
+func file_google_rpc_error_details_proto_init() {
+ if File_google_rpc_error_details_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RetryInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QuotaFailure); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ErrorInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreconditionFailure); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BadRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RequestInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Help); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocalizedMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QuotaFailure_Violation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreconditionFailure_Violation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BadRequest_FieldViolation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Help_Link); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_rpc_error_details_proto_goTypes,
+ DependencyIndexes: file_google_rpc_error_details_proto_depIdxs,
+ MessageInfos: file_google_rpc_error_details_proto_msgTypes,
+ }.Build()
+ File_google_rpc_error_details_proto = out.File
+ file_google_rpc_error_details_proto_rawDesc = nil
+ file_google_rpc_error_details_proto_goTypes = nil
+ file_google_rpc_error_details_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index a67074a3ad..e8dfc828aa 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &balancer.ConnectivityStateEvaluator{},
config: bb.config,
+ state: connectivity.Connecting,
}
// Initialize picker to a picker that always returns
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
@@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
b.ResolverError(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
+
+ b.regeneratePicker()
+ b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
return nil
}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index de6d41c238..0d21f2210b 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
+ for _, opt := range extraDialOptions {
+ opt.apply(&cc.dopts)
+ }
+
for _, opt := range opts {
opt.apply(&cc.dopts)
}
diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go
index b8c2e8f920..e32edc0421 100644
--- a/vendor/google.golang.org/grpc/credentials/google/xds.go
+++ b/vendor/google.golang.org/grpc/credentials/google/xds.go
@@ -21,6 +21,7 @@ package google
import (
"context"
"net"
+ "net/url"
"strings"
"google.golang.org/grpc/credentials"
@@ -28,12 +29,16 @@ import (
)
const cfeClusterNamePrefix = "google_cfe_"
+const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_"
+const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com"
// clusterTransportCreds is a combo of TLS + ALTS.
//
// On the client, ClientHandshake picks TLS or ALTS based on address attributes.
// - if attributes has cluster name
-// - if cluster name has prefix "google_cfe_", use TLS
+// - if cluster name has prefix "google_cfe_", or
+// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_",
+// use TLS
// - otherwise, use ALTS
// - else, do TLS
//
@@ -50,18 +55,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust
}
}
-func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+// clusterName returns the xDS cluster name stored in the attributes in the
+// context.
+func clusterName(ctx context.Context) string {
chi := credentials.ClientHandshakeInfoFromContext(ctx)
if chi.Attributes == nil {
- return c.tls.ClientHandshake(ctx, authority, rawConn)
+ return ""
+ }
+ cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes)
+ return cluster
+}
+
+// isDirectPathCluster returns true if the cluster in the context is a
+// directpath cluster, meaning ALTS should be used.
+func isDirectPathCluster(ctx context.Context) bool {
+ cluster := clusterName(ctx)
+ if cluster == "" {
+ // No cluster; not xDS; use TLS.
+ return false
+ }
+ if strings.HasPrefix(cluster, cfeClusterNamePrefix) {
+ // xDS cluster prefixed by "google_cfe_"; use TLS.
+ return false
}
- cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes)
- if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) {
- return c.tls.ClientHandshake(ctx, authority, rawConn)
+ if !strings.HasPrefix(cluster, "xdstp:") {
+ // Other xDS cluster name; use ALTS.
+ return true
+ }
+ u, err := url.Parse(cluster)
+ if err != nil {
+ // Shouldn't happen, but assume ALTS.
+ return true
+ }
+ // If authority AND path match our CFE checks, use TLS; otherwise use ALTS.
+ return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix)
+}
+
+func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ if isDirectPathCluster(ctx) {
+ // If attributes have cluster name, and cluster name is not cfe, it's a
+ // backend address, use ALTS.
+ return c.alts.ClientHandshake(ctx, authority, rawConn)
}
- // If attributes have cluster name, and cluster name is not cfe, it's a
- // backend address, use ALTS.
- return c.alts.ClientHandshake(ctx, authority, rawConn)
+ return c.tls.ClientHandshake(ctx, authority, rawConn)
}
func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index f2f605a17c..75d01ba777 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -35,6 +35,15 @@ import (
"google.golang.org/grpc/stats"
)
+func init() {
+ internal.AddExtraDialOptions = func(opt ...DialOption) {
+ extraDialOptions = append(extraDialOptions, opt...)
+ }
+ internal.ClearExtraDialOptions = func() {
+ extraDialOptions = nil
+ }
+}
+
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
@@ -70,6 +79,8 @@ type DialOption interface {
apply(*dialOptions)
}
+var extraDialOptions []DialOption
+
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
//
@@ -380,7 +391,7 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
// all the RPCs and underlying network connections in this ClientConn.
func WithStatsHandler(h stats.Handler) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.StatsHandler = h
+ o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
})
}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index 7ba8f4d183..08666f62a7 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -193,6 +193,8 @@ func (gsb *Balancer) ExitIdle() {
ei.ExitIdle()
return
}
+ gsb.mu.Lock()
+ defer gsb.mu.Unlock()
for sc := range balToUpdate.subconns {
sc.Connect()
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index 0a25ce43f3..e3dfe204f9 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -42,14 +42,14 @@ var binLogger Logger
var grpclogLogger = grpclog.Component("binarylog")
-// SetLogger sets the binarg logger.
+// SetLogger sets the binary logger.
//
// Only call this at init time.
func SetLogger(l Logger) {
binLogger = l
}
-// GetLogger gets the binarg logger.
+// GetLogger gets the binary logger.
//
// Only call this at init time.
func GetLogger() Logger {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 7d996e51b5..55aaeea8b4 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -77,7 +77,7 @@ var (
// environment variable
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
// "true".
- XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
+ XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
// which can be disabled by setting the environment variable
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 6d355b0b01..83018be7c7 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -63,6 +63,76 @@ var (
// xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode.
DrainServerTransports interface{} // func(*grpc.Server, string)
+ // AddExtraServerOptions adds an array of ServerOption that will be
+ // effective globally for newly created servers. The priority will be: 1.
+ // user-provided; 2. this method; 3. default values.
+ AddExtraServerOptions interface{} // func(opt ...ServerOption)
+ // ClearExtraServerOptions clears the array of extra ServerOption. This
+ // method is useful in testing and benchmarking.
+ ClearExtraServerOptions func()
+ // AddExtraDialOptions adds an array of DialOption that will be effective
+ // globally for newly created client channels. The priority will be: 1.
+ // user-provided; 2. this method; 3. default values.
+ AddExtraDialOptions interface{} // func(opt ...DialOption)
+ // ClearExtraDialOptions clears the array of extra DialOption. This
+ // method is useful in testing and benchmarking.
+ ClearExtraDialOptions func()
+
+ // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
+ // the provided xds bootstrap config instead of the global configuration from
+ // the supported environment variables. The resolver.Builder is meant to be
+ // used in conjunction with the grpc.WithResolvers DialOption.
+ //
+ // Testing Only
+ //
+ // This function should ONLY be used for testing and may not work with some
+ // other features, including the CSDS service.
+ NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
+
+ // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
+ // Specifier Plugin for testing purposes, regardless of the XDSRLS environment
+ // variable.
+ //
+ // TODO: Remove this function once the RLS env var is removed.
+ RegisterRLSClusterSpecifierPluginForTesting func()
+
+ // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
+ // Specifier Plugin for testing purposes. This is needed because there is no way
+ // to unregister the RLS Cluster Specifier Plugin after registering it solely
+ // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
+ //
+ // TODO: Remove this function once the RLS env var is removed.
+ UnregisterRLSClusterSpecifierPluginForTesting func()
+
+ // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
+ // purposes, regardless of the RBAC environment variable.
+ //
+ // TODO: Remove this function once the RBAC env var is removed.
+ RegisterRBACHTTPFilterForTesting func()
+
+ // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
+ // testing purposes. This is needed because there is no way to unregister the
+ // HTTP Filter after registering it solely for testing purposes using
+ // RegisterRBACHTTPFilterForTesting().
+ //
+ // TODO: Remove this function once the RBAC env var is removed.
+ UnregisterRBACHTTPFilterForTesting func()
+
+ // RegisterOutlierDetectionBalancerForTesting registers the Outlier
+ // Detection Balancer for testing purposes, regardless of the Outlier
+ // Detection environment variable.
+ //
+ // TODO: Remove this function once the Outlier Detection env var is removed.
+ RegisterOutlierDetectionBalancerForTesting func()
+
+ // UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier
+ // Detection Balancer for testing purposes. This is needed because there is
+ // no way to unregister the Outlier Detection Balancer after registering it
+ // solely for testing purposes using
+ // RegisterOutlierDetectionBalancerForTesting().
+ //
+ // TODO: Remove this function once the Outlier Detection env var is removed.
+ UnregisterOutlierDetectionBalancerForTesting func()
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 1c3459c2b4..090120925b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -49,7 +49,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC
// from inside an http.Handler. It requires that the http Server
// supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
if r.ProtoMajor != 2 {
return nil, errors.New("gRPC requires HTTP/2")
}
@@ -138,7 +138,7 @@ type serverHandlerTransport struct {
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
- stats stats.Handler
+ stats []stats.Handler
}
func (ht *serverHandlerTransport) Close() {
@@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
})
if err == nil { // transport has not been closed
- if ht.stats != nil {
- // Note: The trailer fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+ // Note: The trailer fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ for _, sh := range ht.stats {
+ sh.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
@@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
})
if err == nil {
- if ht.stats != nil {
+ for _, sh := range ht.stats {
// Note: The header fields are compressed with hpack after this call returns.
// No WireLength field is set here.
- ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
+ sh.HandleRPC(s.Context(), &stats.OutHeader{
Header: md.Copy(),
Compression: s.sendCompress,
})
@@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
}
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
s.ctx = peer.NewContext(ctx, pr)
- if ht.stats != nil {
- s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+ for _, sh := range ht.stats {
+ s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
FullMethod: s.method,
RemoteAddr: ht.RemoteAddr(),
Compression: s.recvCompress,
}
- ht.stats.HandleRPC(s.ctx, inHeader)
+ sh.HandleRPC(s.ctx, inHeader)
}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 24ca59084b..be371c6e0f 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -90,7 +90,7 @@ type http2Client struct {
kp keepalive.ClientParameters
keepaliveEnabled bool
- statsHandler stats.Handler
+ statsHandlers []stats.Handler
initialWindowSize int32
@@ -311,7 +311,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
isSecure: isSecure,
perRPCCreds: perRPCCreds,
kp: kp,
- statsHandler: opts.StatsHandler,
+ statsHandlers: opts.StatsHandlers,
initialWindowSize: initialWindowSize,
onPrefaceReceipt: onPrefaceReceipt,
nextID: 1,
@@ -341,15 +341,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
updateFlowControl: t.updateFlowControl,
}
}
- if t.statsHandler != nil {
- t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
+ for _, sh := range t.statsHandlers {
+ t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{
Client: true,
}
- t.statsHandler.HandleConn(t.ctx, connBegin)
+ sh.HandleConn(t.ctx, connBegin)
}
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
if err != nil {
@@ -773,24 +773,27 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
- if t.statsHandler != nil {
+ if len(t.statsHandlers) != 0 {
header, ok := metadata.FromOutgoingContext(ctx)
if ok {
header.Set("user-agent", t.userAgent)
} else {
header = metadata.Pairs("user-agent", t.userAgent)
}
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- outHeader := &stats.OutHeader{
- Client: true,
- FullMethod: callHdr.Method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: callHdr.SendCompress,
- Header: header,
+ for _, sh := range t.statsHandlers {
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ // Note: Creating a new stats object to prevent pollution.
+ outHeader := &stats.OutHeader{
+ Client: true,
+ FullMethod: callHdr.Method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: callHdr.SendCompress,
+ Header: header,
+ }
+ sh.HandleRPC(s.ctx, outHeader)
}
- t.statsHandler.HandleRPC(s.ctx, outHeader)
}
return s, nil
}
@@ -916,11 +919,11 @@ func (t *http2Client) Close(err error) {
for _, s := range streams {
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
- if t.statsHandler != nil {
+ for _, sh := range t.statsHandlers {
connEnd := &stats.ConnEnd{
Client: true,
}
- t.statsHandler.HandleConn(t.ctx, connEnd)
+ sh.HandleConn(t.ctx, connEnd)
}
}
@@ -1432,7 +1435,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
close(s.headerChan)
}
- if t.statsHandler != nil {
+ for _, sh := range t.statsHandlers {
if isHeader {
inHeader := &stats.InHeader{
Client: true,
@@ -1440,14 +1443,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
Header: metadata.MD(mdata).Copy(),
Compression: s.recvCompress,
}
- t.statsHandler.HandleRPC(s.ctx, inHeader)
+ sh.HandleRPC(s.ctx, inHeader)
} else {
inTrailer := &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
Trailer: metadata.MD(mdata).Copy(),
}
- t.statsHandler.HandleRPC(s.ctx, inTrailer)
+ sh.HandleRPC(s.ctx, inTrailer)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 45d7bd145e..2b0fde334c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -82,7 +82,7 @@ type http2Server struct {
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
- stats stats.Handler
+ stats []stats.Handler
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
@@ -257,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
activeStreams: make(map[uint32]*Stream),
- stats: config.StatsHandler,
+ stats: config.StatsHandlers,
kp: kp,
idle: time.Now(),
kep: kep,
@@ -272,13 +272,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
updateFlowControl: t.updateFlowControl,
}
}
- if t.stats != nil {
- t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
+ for _, sh := range t.stats {
+ t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{}
- t.stats.HandleConn(t.ctx, connBegin)
+ sh.HandleConn(t.ctx, connBegin)
}
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
if err != nil {
@@ -570,8 +570,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
t.adjustWindow(s, uint32(n))
}
s.ctx = traceCtx(s.ctx, s.method)
- if t.stats != nil {
- s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+ for _, sh := range t.stats {
+ s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
FullMethod: s.method,
RemoteAddr: t.remoteAddr,
@@ -580,7 +580,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
WireLength: int(frame.Header().Length),
Header: metadata.MD(mdata).Copy(),
}
- t.stats.HandleRPC(s.ctx, inHeader)
+ sh.HandleRPC(s.ctx, inHeader)
}
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
@@ -996,14 +996,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
- if t.stats != nil {
+ for _, sh := range t.stats {
// Note: Headers are compressed with hpack after this call returns.
// No WireLength field is set here.
outHeader := &stats.OutHeader{
Header: s.header.Copy(),
Compression: s.sendCompress,
}
- t.stats.HandleRPC(s.Context(), outHeader)
+ sh.HandleRPC(s.Context(), outHeader)
}
return nil
}
@@ -1064,10 +1064,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
- if t.stats != nil {
+ for _, sh := range t.stats {
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
- t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+ sh.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
@@ -1222,9 +1222,9 @@ func (t *http2Server) Close() {
for _, s := range streams {
s.cancel()
}
- if t.stats != nil {
+ for _, sh := range t.stats {
connEnd := &stats.ConnEnd{}
- t.stats.HandleConn(t.ctx, connEnd)
+ sh.HandleConn(t.ctx, connEnd)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index d8247bcdf6..b775130686 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -322,8 +322,6 @@ type bufWriter struct {
batchSize int
conn net.Conn
err error
-
- onFlush func()
}
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
@@ -360,9 +358,6 @@ func (w *bufWriter) Flush() error {
if w.offset == 0 {
return nil
}
- if w.onFlush != nil {
- w.onFlush()
- }
_, w.err = w.conn.Write(w.buf[:w.offset])
w.offset = 0
return w.err
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index a9ce717f16..6c3ba85159 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -523,7 +523,7 @@ type ServerConfig struct {
ConnectionTimeout time.Duration
Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
- StatsHandler stats.Handler
+ StatsHandlers []stats.Handler
KeepaliveParams keepalive.ServerParameters
KeepalivePolicy keepalive.EnforcementPolicy
InitialWindowSize int32
@@ -553,8 +553,8 @@ type ConnectOptions struct {
CredsBundle credentials.Bundle
// KeepaliveParams stores the keepalive parameters.
KeepaliveParams keepalive.ClientParameters
- // StatsHandler stores the handler for stats.
- StatsHandler stats.Handler
+ // StatsHandlers stores the handler for stats.
+ StatsHandlers []stats.Handler
// InitialWindowSize sets the initial window size for a stream.
InitialWindowSize int32
// InitialConnWindowSize sets the initial window size for a connection.
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
index 978b89f37a..99db79fafc 100644
--- a/vendor/google.golang.org/grpc/regenerate.sh
+++ b/vendor/google.golang.org/grpc/regenerate.sh
@@ -68,7 +68,6 @@ SOURCES=(
${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
- ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto
${WORKDIR}/grpc-proto/grpc/testing/*.proto
${WORKDIR}/grpc-proto/grpc/core/*.proto
)
@@ -80,8 +79,7 @@ SOURCES=(
# Note that the protos listed here are all for testing purposes. All protos to
# be used externally should have a go_package option (and they don't need to be
# listed here).
-OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
-Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
+OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
@@ -121,9 +119,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
# see grpc_testing_not_regenerate/README.md for details.
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
-# grpc/service_config/service_config.proto does not have a go_package option.
-mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
-
# grpc/testing does not have a go_package option.
mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
index e87ecd0eeb..efcb7f3efd 100644
--- a/vendor/google.golang.org/grpc/resolver/map.go
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -28,25 +28,40 @@ type addressMapEntry struct {
// Multiple accesses may not be performed concurrently. Must be created via
// NewAddressMap; do not construct directly.
type AddressMap struct {
- m map[string]addressMapEntryList
+ // The underlying map is keyed by an Address with fields that we don't care
+ // about being set to their zero values. The only fields that we care about
+ // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
+ // distinguish between addresses with same `Addr` and `ServerName`, but
+ // different `Attributes`, we cannot store the `Attributes` in the map key.
+ //
+ // The comparison operation for structs work as follows:
+ // Struct values are comparable if all their fields are comparable. Two
+ // struct values are equal if their corresponding non-blank fields are equal.
+ //
+ // The value type of the map contains a slice of addresses which match the key
+ // in their `Addr` and `ServerName` fields and contain the corresponding value
+ // associated with them.
+ m map[Address]addressMapEntryList
+}
+
+func toMapKey(addr *Address) Address {
+ return Address{Addr: addr.Addr, ServerName: addr.ServerName}
}
type addressMapEntryList []*addressMapEntry
// NewAddressMap creates a new AddressMap.
func NewAddressMap() *AddressMap {
- return &AddressMap{m: make(map[string]addressMapEntryList)}
+ return &AddressMap{m: make(map[Address]addressMapEntryList)}
}
// find returns the index of addr in the addressMapEntry slice, or -1 if not
// present.
func (l addressMapEntryList) find(addr Address) int {
- if len(l) == 0 {
- return -1
- }
for i, entry := range l {
- if entry.addr.ServerName == addr.ServerName &&
- entry.addr.Attributes.Equal(addr.Attributes) {
+ // Attributes are the only thing to match on here, since `Addr` and
+ // `ServerName` are already equal.
+ if entry.addr.Attributes.Equal(addr.Attributes) {
return i
}
}
@@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int {
// Get returns the value for the address in the map, if present.
func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
- entryList := a.m[addr.Addr]
+ addrKey := toMapKey(&addr)
+ entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
return entryList[entry].value, true
}
@@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
// Set updates or adds the value to the address in the map.
func (a *AddressMap) Set(addr Address, value interface{}) {
- entryList := a.m[addr.Addr]
+ addrKey := toMapKey(&addr)
+ entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
- a.m[addr.Addr][entry].value = value
+ entryList[entry].value = value
return
}
- a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value})
+ a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
}
// Delete removes addr from the map.
func (a *AddressMap) Delete(addr Address) {
- entryList := a.m[addr.Addr]
+ addrKey := toMapKey(&addr)
+ entryList := a.m[addrKey]
entry := entryList.find(addr)
if entry == -1 {
return
@@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) {
copy(entryList[entry:], entryList[entry+1:])
entryList = entryList[:len(entryList)-1]
}
- a.m[addr.Addr] = entryList
+ a.m[addrKey] = entryList
}
// Len returns the number of entries in the map.
@@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address {
}
return ret
}
+
+// Values returns a slice of all current map values.
+func (a *AddressMap) Values() []interface{} {
+ ret := make([]interface{}, 0, a.Len())
+ for _, entryList := range a.m {
+ for _, entry := range entryList {
+ ret = append(ret, entry.value)
+ }
+ }
+ return ret
+}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 65de84b300..b54f5bb572 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -73,6 +73,12 @@ func init() {
internal.DrainServerTransports = func(srv *Server, addr string) {
srv.drainServerTransports(addr)
}
+ internal.AddExtraServerOptions = func(opt ...ServerOption) {
+ extraServerOptions = opt
+ }
+ internal.ClearExtraServerOptions = func() {
+ extraServerOptions = nil
+ }
}
var statusOK = status.New(codes.OK, "")
@@ -150,7 +156,7 @@ type serverOptions struct {
chainUnaryInts []UnaryServerInterceptor
chainStreamInts []StreamServerInterceptor
inTapHandle tap.ServerInHandle
- statsHandler stats.Handler
+ statsHandlers []stats.Handler
maxConcurrentStreams uint32
maxReceiveMessageSize int
maxSendMessageSize int
@@ -174,6 +180,7 @@ var defaultServerOptions = serverOptions{
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
}
+var extraServerOptions []ServerOption
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption interface {
@@ -435,7 +442,7 @@ func InTapHandle(h tap.ServerInHandle) ServerOption {
// StatsHandler returns a ServerOption that sets the stats handler for the server.
func StatsHandler(h stats.Handler) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.statsHandler = h
+ o.statsHandlers = append(o.statsHandlers, h)
})
}
@@ -560,6 +567,9 @@ func (s *Server) stopServerWorkers() {
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
opts := defaultServerOptions
+ for _, o := range extraServerOptions {
+ o.apply(&opts)
+ }
for _, o := range opt {
o.apply(&opts)
}
@@ -867,7 +877,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ConnectionTimeout: s.opts.connectionTimeout,
Credentials: s.opts.creds,
InTapHandle: s.opts.inTapHandle,
- StatsHandler: s.opts.statsHandler,
+ StatsHandlers: s.opts.statsHandlers,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
@@ -963,7 +973,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
+ st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@@ -1076,8 +1086,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
}
err = t.Write(stream, hdr, payload, opts)
- if err == nil && s.opts.statsHandler != nil {
- s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+ if err == nil {
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+ }
}
return err
}
@@ -1124,13 +1136,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn
}
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
- sh := s.opts.statsHandler
- if sh != nil || trInfo != nil || channelz.IsOn() {
+ shs := s.opts.statsHandlers
+ if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
s.incrCallsStarted()
}
var statsBegin *stats.Begin
- if sh != nil {
+ for _, sh := range shs {
beginTime := time.Now()
statsBegin = &stats.Begin{
BeginTime: beginTime,
@@ -1161,7 +1173,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
trInfo.tr.Finish()
}
- if sh != nil {
+ for _, sh := range shs {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1243,7 +1255,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
var payInfo *payloadInfo
- if sh != nil || binlog != nil {
+ if len(shs) != 0 || binlog != nil {
payInfo = &payloadInfo{}
}
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
@@ -1260,7 +1272,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
- if sh != nil {
+ for _, sh := range shs {
sh.HandleRPC(stream.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
@@ -1418,16 +1430,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if channelz.IsOn() {
s.incrCallsStarted()
}
- sh := s.opts.statsHandler
+ shs := s.opts.statsHandlers
var statsBegin *stats.Begin
- if sh != nil {
+ if len(shs) != 0 {
beginTime := time.Now()
statsBegin = &stats.Begin{
BeginTime: beginTime,
IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams,
}
- sh.HandleRPC(stream.Context(), statsBegin)
+ for _, sh := range shs {
+ sh.HandleRPC(stream.Context(), statsBegin)
+ }
}
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
ss := &serverStream{
@@ -1439,10 +1453,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
- statsHandler: sh,
+ statsHandler: shs,
}
- if sh != nil || trInfo != nil || channelz.IsOn() {
+ if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
// See comment in processUnaryRPC on defers.
defer func() {
if trInfo != nil {
@@ -1456,7 +1470,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.mu.Unlock()
}
- if sh != nil {
+ if len(shs) != 0 {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1464,7 +1478,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
- sh.HandleRPC(stream.Context(), end)
+ for _, sh := range shs {
+ sh.HandleRPC(stream.Context(), end)
+ }
}
if channelz.IsOn() {
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 236fc17ec3..6d82e0d7cc 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -374,9 +374,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
method := cs.callHdr.Method
- sh := cs.cc.dopts.copts.StatsHandler
var beginTime time.Time
- if sh != nil {
+ shs := cs.cc.dopts.copts.StatsHandlers
+ for _, sh := range shs {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
beginTime = time.Now()
begin := &stats.Begin{
@@ -414,12 +414,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
}
return &csAttempt{
- ctx: ctx,
- beginTime: beginTime,
- cs: cs,
- dc: cs.cc.dopts.dc,
- statsHandler: sh,
- trInfo: trInfo,
+ ctx: ctx,
+ beginTime: beginTime,
+ cs: cs,
+ dc: cs.cc.dopts.dc,
+ statsHandlers: shs,
+ trInfo: trInfo,
}, nil
}
@@ -536,8 +536,8 @@ type csAttempt struct {
// and cleared when the finish method is called.
trInfo *traceInfo
- statsHandler stats.Handler
- beginTime time.Time
+ statsHandlers []stats.Handler
+ beginTime time.Time
// set for newStream errors that may be transparently retried
allowTransparentRetry bool
@@ -960,8 +960,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
}
return io.EOF
}
- if a.statsHandler != nil {
- a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
}
if channelz.IsOn() {
a.t.IncrMsgSent()
@@ -971,7 +971,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
cs := a.cs
- if a.statsHandler != nil && payInfo == nil {
+ if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
}
@@ -1008,8 +1008,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
}
a.mu.Unlock()
}
- if a.statsHandler != nil {
- a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, &stats.InPayload{
Client: true,
RecvTime: time.Now(),
Payload: m,
@@ -1068,7 +1068,7 @@ func (a *csAttempt) finish(err error) {
ServerLoad: balancerload.Parse(tr),
})
}
- if a.statsHandler != nil {
+ for _, sh := range a.statsHandlers {
end := &stats.End{
Client: true,
BeginTime: a.beginTime,
@@ -1076,7 +1076,7 @@ func (a *csAttempt) finish(err error) {
Trailer: tr,
Error: err,
}
- a.statsHandler.HandleRPC(a.ctx, end)
+ sh.HandleRPC(a.ctx, end)
}
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
@@ -1445,7 +1445,7 @@ type serverStream struct {
maxSendMessageSize int
trInfo *traceInfo
- statsHandler stats.Handler
+ statsHandler []stats.Handler
binlog binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
@@ -1555,8 +1555,10 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
Message: data,
})
}
- if ss.statsHandler != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ if len(ss.statsHandler) != 0 {
+ for _, sh := range ss.statsHandler {
+ sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ }
}
return nil
}
@@ -1590,7 +1592,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
}()
var payInfo *payloadInfo
- if ss.statsHandler != nil || ss.binlog != nil {
+ if len(ss.statsHandler) != 0 || ss.binlog != nil {
payInfo = &payloadInfo{}
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
@@ -1605,15 +1607,17 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
return toRPCErr(err)
}
- if ss.statsHandler != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
- WireLength: payInfo.wireLength + headerLen,
- Length: len(payInfo.uncompressedBytes),
- })
+ if len(ss.statsHandler) != 0 {
+ for _, sh := range ss.statsHandler {
+ sh.HandleRPC(ss.s.Context(), &stats.InPayload{
+ RecvTime: time.Now(),
+ Payload: m,
+ // TODO truncate large payload.
+ Data: payInfo.uncompressedBytes,
+ WireLength: payInfo.wireLength + headerLen,
+ Length: len(payInfo.uncompressedBytes),
+ })
+ }
}
if ss.binlog != nil {
ss.binlog.Log(&binarylog.ClientMessage{
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 5bc03f9b36..0eb2998cbe 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.47.0"
+const Version = "1.48.0"
diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS
deleted file mode 100644
index 2b00ddba0d..0000000000
--- a/vendor/google.golang.org/protobuf/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS
deleted file mode 100644
index 1fbd3e976f..0000000000
--- a/vendor/google.golang.org/protobuf/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index 07da5db345..5f28148d80 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -19,7 +19,7 @@ import (
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/set"
"google.golang.org/protobuf/proto"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
@@ -113,7 +113,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
}
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
-func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
+func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
return unmarshal(d, m)
}
@@ -159,10 +159,10 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
}
// Get the FieldDescriptor.
- var fd pref.FieldDescriptor
+ var fd protoreflect.FieldDescriptor
if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
// Only extension names are in [name] format.
- extName := pref.FullName(name[1 : len(name)-1])
+ extName := protoreflect.FullName(name[1 : len(name)-1])
extType, err := d.opts.Resolver.FindExtensionByName(extName)
if err != nil && err != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
@@ -240,23 +240,23 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
}
}
-func isKnownValue(fd pref.FieldDescriptor) bool {
+func isKnownValue(fd protoreflect.FieldDescriptor) bool {
md := fd.Message()
return md != nil && md.FullName() == genid.Value_message_fullname
}
-func isNullValue(fd pref.FieldDescriptor) bool {
+func isNullValue(fd protoreflect.FieldDescriptor) bool {
ed := fd.Enum()
return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
}
// unmarshalSingular unmarshals to the non-repeated field specified
// by the given FieldDescriptor.
-func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error {
- var val pref.Value
+func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error {
+ var val protoreflect.Value
var err error
switch fd.Kind() {
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
val = m.NewField(fd)
err = d.unmarshalMessage(val.Message(), false)
default:
@@ -272,63 +272,63 @@ func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) erro
// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
// the given FieldDescriptor.
-func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
+func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
const b32 int = 32
const b64 int = 64
tok, err := d.Read()
if err != nil {
- return pref.Value{}, err
+ return protoreflect.Value{}, err
}
kind := fd.Kind()
switch kind {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if tok.Kind() == json.Bool {
- return pref.ValueOfBool(tok.Bool()), nil
+ return protoreflect.ValueOfBool(tok.Bool()), nil
}
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if v, ok := unmarshalInt(tok, b32); ok {
return v, nil
}
- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if v, ok := unmarshalInt(tok, b64); ok {
return v, nil
}
- case pref.Uint32Kind, pref.Fixed32Kind:
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if v, ok := unmarshalUint(tok, b32); ok {
return v, nil
}
- case pref.Uint64Kind, pref.Fixed64Kind:
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if v, ok := unmarshalUint(tok, b64); ok {
return v, nil
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if v, ok := unmarshalFloat(tok, b32); ok {
return v, nil
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if v, ok := unmarshalFloat(tok, b64); ok {
return v, nil
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if tok.Kind() == json.String {
- return pref.ValueOfString(tok.ParsedString()), nil
+ return protoreflect.ValueOfString(tok.ParsedString()), nil
}
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if v, ok := unmarshalBytes(tok); ok {
return v, nil
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if v, ok := unmarshalEnum(tok, fd); ok {
return v, nil
}
@@ -337,10 +337,10 @@ func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
}
- return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
}
-func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) {
+func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.Number:
return getInt(tok, bitSize)
@@ -349,30 +349,30 @@ func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) {
// Decode number from string.
s := strings.TrimSpace(tok.ParsedString())
if len(s) != len(tok.ParsedString()) {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
dec := json.NewDecoder([]byte(s))
tok, err := dec.Read()
if err != nil {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
return getInt(tok, bitSize)
}
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
-func getInt(tok json.Token, bitSize int) (pref.Value, bool) {
+func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
n, ok := tok.Int(bitSize)
if !ok {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
if bitSize == 32 {
- return pref.ValueOfInt32(int32(n)), true
+ return protoreflect.ValueOfInt32(int32(n)), true
}
- return pref.ValueOfInt64(n), true
+ return protoreflect.ValueOfInt64(n), true
}
-func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) {
+func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.Number:
return getUint(tok, bitSize)
@@ -381,30 +381,30 @@ func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) {
// Decode number from string.
s := strings.TrimSpace(tok.ParsedString())
if len(s) != len(tok.ParsedString()) {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
dec := json.NewDecoder([]byte(s))
tok, err := dec.Read()
if err != nil {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
return getUint(tok, bitSize)
}
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
-func getUint(tok json.Token, bitSize int) (pref.Value, bool) {
+func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
n, ok := tok.Uint(bitSize)
if !ok {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
if bitSize == 32 {
- return pref.ValueOfUint32(uint32(n)), true
+ return protoreflect.ValueOfUint32(uint32(n)), true
}
- return pref.ValueOfUint64(n), true
+ return protoreflect.ValueOfUint64(n), true
}
-func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) {
+func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.Number:
return getFloat(tok, bitSize)
@@ -414,49 +414,49 @@ func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) {
switch s {
case "NaN":
if bitSize == 32 {
- return pref.ValueOfFloat32(float32(math.NaN())), true
+ return protoreflect.ValueOfFloat32(float32(math.NaN())), true
}
- return pref.ValueOfFloat64(math.NaN()), true
+ return protoreflect.ValueOfFloat64(math.NaN()), true
case "Infinity":
if bitSize == 32 {
- return pref.ValueOfFloat32(float32(math.Inf(+1))), true
+ return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true
}
- return pref.ValueOfFloat64(math.Inf(+1)), true
+ return protoreflect.ValueOfFloat64(math.Inf(+1)), true
case "-Infinity":
if bitSize == 32 {
- return pref.ValueOfFloat32(float32(math.Inf(-1))), true
+ return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true
}
- return pref.ValueOfFloat64(math.Inf(-1)), true
+ return protoreflect.ValueOfFloat64(math.Inf(-1)), true
}
// Decode number from string.
if len(s) != len(strings.TrimSpace(s)) {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
dec := json.NewDecoder([]byte(s))
tok, err := dec.Read()
if err != nil {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
return getFloat(tok, bitSize)
}
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
-func getFloat(tok json.Token, bitSize int) (pref.Value, bool) {
+func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
n, ok := tok.Float(bitSize)
if !ok {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
if bitSize == 32 {
- return pref.ValueOfFloat32(float32(n)), true
+ return protoreflect.ValueOfFloat32(float32(n)), true
}
- return pref.ValueOfFloat64(n), true
+ return protoreflect.ValueOfFloat64(n), true
}
-func unmarshalBytes(tok json.Token) (pref.Value, bool) {
+func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
if tok.Kind() != json.String {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
s := tok.ParsedString()
@@ -469,36 +469,36 @@ func unmarshalBytes(tok json.Token) (pref.Value, bool) {
}
b, err := enc.DecodeString(s)
if err != nil {
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
- return pref.ValueOfBytes(b), true
+ return protoreflect.ValueOfBytes(b), true
}
-func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) {
+func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.String:
// Lookup EnumNumber based on name.
s := tok.ParsedString()
- if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil {
- return pref.ValueOfEnum(enumVal.Number()), true
+ if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
+ return protoreflect.ValueOfEnum(enumVal.Number()), true
}
case json.Number:
if n, ok := tok.Int(32); ok {
- return pref.ValueOfEnum(pref.EnumNumber(n)), true
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true
}
case json.Null:
// This is only valid for google.protobuf.NullValue.
if isNullValue(fd) {
- return pref.ValueOfEnum(0), true
+ return protoreflect.ValueOfEnum(0), true
}
}
- return pref.Value{}, false
+ return protoreflect.Value{}, false
}
-func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
+func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
tok, err := d.Read()
if err != nil {
return err
@@ -508,7 +508,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
}
switch fd.Kind() {
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
for {
tok, err := d.Peek()
if err != nil {
@@ -549,7 +549,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
return nil
}
-func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
+func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
tok, err := d.Read()
if err != nil {
return err
@@ -561,18 +561,18 @@ func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
// Determine ahead whether map entry is a scalar type or a message type in
// order to call the appropriate unmarshalMapValue func inside the for loop
// below.
- var unmarshalMapValue func() (pref.Value, error)
+ var unmarshalMapValue func() (protoreflect.Value, error)
switch fd.MapValue().Kind() {
- case pref.MessageKind, pref.GroupKind:
- unmarshalMapValue = func() (pref.Value, error) {
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ unmarshalMapValue = func() (protoreflect.Value, error) {
val := mmap.NewValue()
if err := d.unmarshalMessage(val.Message(), false); err != nil {
- return pref.Value{}, err
+ return protoreflect.Value{}, err
}
return val, nil
}
default:
- unmarshalMapValue = func() (pref.Value, error) {
+ unmarshalMapValue = func() (protoreflect.Value, error) {
return d.unmarshalScalar(fd.MapValue())
}
}
@@ -618,7 +618,7 @@ Loop:
// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
// A map key type is any integral or string type.
-func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) {
+func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) {
const b32 = 32
const b64 = 64
const base10 = 10
@@ -626,40 +626,40 @@ func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.
name := tok.Name()
kind := fd.Kind()
switch kind {
- case pref.StringKind:
- return pref.ValueOfString(name).MapKey(), nil
+ case protoreflect.StringKind:
+ return protoreflect.ValueOfString(name).MapKey(), nil
- case pref.BoolKind:
+ case protoreflect.BoolKind:
switch name {
case "true":
- return pref.ValueOfBool(true).MapKey(), nil
+ return protoreflect.ValueOfBool(true).MapKey(), nil
case "false":
- return pref.ValueOfBool(false).MapKey(), nil
+ return protoreflect.ValueOfBool(false).MapKey(), nil
}
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if n, err := strconv.ParseInt(name, base10, b32); err == nil {
- return pref.ValueOfInt32(int32(n)).MapKey(), nil
+ return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil
}
- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if n, err := strconv.ParseInt(name, base10, b64); err == nil {
- return pref.ValueOfInt64(int64(n)).MapKey(), nil
+ return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil
}
- case pref.Uint32Kind, pref.Fixed32Kind:
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if n, err := strconv.ParseUint(name, base10, b32); err == nil {
- return pref.ValueOfUint32(uint32(n)).MapKey(), nil
+ return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil
}
- case pref.Uint64Kind, pref.Fixed64Kind:
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if n, err := strconv.ParseUint(name, base10, b64); err == nil {
- return pref.ValueOfUint64(uint64(n)).MapKey(), nil
+ return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil
}
default:
panic(fmt.Sprintf("invalid kind for map key: %v", kind))
}
- return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
+ return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index ba971f0781..d09d22e139 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -18,7 +18,6 @@ import (
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
@@ -164,8 +163,8 @@ type typeURLFieldRanger struct {
typeURL string
}
-func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
- if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) {
+func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) {
return
}
m.FieldRanger.Range(f)
@@ -173,9 +172,9 @@ func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool)
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
// method to additionally iterate over unpopulated fields.
-type unpopulatedFieldRanger struct{ pref.Message }
+type unpopulatedFieldRanger struct{ protoreflect.Message }
-func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
fds := m.Descriptor().Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
@@ -184,10 +183,10 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b
}
v := m.Get(fd)
- isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid()
- isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil
+ isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
+ isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
if isProto2Scalar || isSingularMessage {
- v = pref.Value{} // use invalid value to emit null
+ v = protoreflect.Value{} // use invalid value to emit null
}
if !f(fd, v) {
return
@@ -199,7 +198,7 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b
// marshalMessage marshals the fields in the given protoreflect.Message.
// If the typeURL is non-empty, then a synthetic "@type" field is injected
// containing the URL as the value.
-func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
+func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
return errors.New("no support for proto1 MessageSets")
}
@@ -220,7 +219,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
}
var err error
- order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool {
+ order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
name := fd.JSONName()
if e.opts.UseProtoNames {
name = fd.TextName()
@@ -238,7 +237,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
}
// marshalValue marshals the given protoreflect.Value.
-func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error {
+func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch {
case fd.IsList():
return e.marshalList(val.List(), fd)
@@ -251,44 +250,44 @@ func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error {
// marshalSingular marshals the given non-repeated field value. This includes
// all scalar types, enums, messages, and groups.
-func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
+func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
if !val.IsValid() {
e.WriteNull()
return nil
}
switch kind := fd.Kind(); kind {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
e.WriteBool(val.Bool())
- case pref.StringKind:
+ case protoreflect.StringKind:
if e.WriteString(val.String()) != nil {
return errors.InvalidUTF8(string(fd.FullName()))
}
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
e.WriteInt(val.Int())
- case pref.Uint32Kind, pref.Fixed32Kind:
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
e.WriteUint(val.Uint())
- case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind,
- pref.Sfixed64Kind, pref.Fixed64Kind:
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind,
+ protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind:
// 64-bit integers are written out as JSON string.
e.WriteString(val.String())
- case pref.FloatKind:
+ case protoreflect.FloatKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 32)
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 64)
- case pref.BytesKind:
+ case protoreflect.BytesKind:
e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if fd.Enum().FullName() == genid.NullValue_enum_fullname {
e.WriteNull()
} else {
@@ -300,7 +299,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
}
}
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
if err := e.marshalMessage(val.Message(), ""); err != nil {
return err
}
@@ -312,7 +311,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
}
// marshalList marshals the given protoreflect.List.
-func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error {
+func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
e.StartArray()
defer e.EndArray()
@@ -326,12 +325,12 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error {
}
// marshalMap marshals given protoreflect.Map.
-func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
+func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
e.StartObject()
defer e.EndObject()
var err error
- order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool {
+ order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
if err = e.WriteName(k.String()); err != nil {
return false
}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 72924a9050..c85f846948 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -17,14 +17,14 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
-type marshalFunc func(encoder, pref.Message) error
+type marshalFunc func(encoder, protoreflect.Message) error
// wellKnownTypeMarshaler returns a marshal function if the message type
// has specialized serialization behavior. It returns nil otherwise.
-func wellKnownTypeMarshaler(name pref.FullName) marshalFunc {
+func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc {
if name.Parent() == genid.GoogleProtobuf_package {
switch name.Name() {
case genid.Any_message_name:
@@ -58,11 +58,11 @@ func wellKnownTypeMarshaler(name pref.FullName) marshalFunc {
return nil
}
-type unmarshalFunc func(decoder, pref.Message) error
+type unmarshalFunc func(decoder, protoreflect.Message) error
// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
// has specialized serialization behavior. It returns nil otherwise.
-func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc {
+func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc {
if name.Parent() == genid.GoogleProtobuf_package {
switch name.Name() {
case genid.Any_message_name:
@@ -102,7 +102,7 @@ func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc {
// custom JSON representation, that representation will be embedded adding a
// field `value` which holds the custom JSON in addition to the `@type` field.
-func (e encoder) marshalAny(m pref.Message) error {
+func (e encoder) marshalAny(m protoreflect.Message) error {
fds := m.Descriptor().Fields()
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
fdValue := fds.ByNumber(genid.Any_Value_field_number)
@@ -163,7 +163,7 @@ func (e encoder) marshalAny(m pref.Message) error {
return nil
}
-func (d decoder) unmarshalAny(m pref.Message) error {
+func (d decoder) unmarshalAny(m protoreflect.Message) error {
// Peek to check for json.ObjectOpen to avoid advancing a read.
start, err := d.Peek()
if err != nil {
@@ -233,8 +233,8 @@ func (d decoder) unmarshalAny(m pref.Message) error {
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
fdValue := fds.ByNumber(genid.Any_Value_field_number)
- m.Set(fdType, pref.ValueOfString(typeURL))
- m.Set(fdValue, pref.ValueOfBytes(b))
+ m.Set(fdType, protoreflect.ValueOfString(typeURL))
+ m.Set(fdValue, protoreflect.ValueOfBytes(b))
return nil
}
@@ -354,7 +354,7 @@ func (d decoder) skipJSONValue() error {
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
// object's "value" field.
-func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error {
+func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error {
// Skip ObjectOpen, and start reading the fields.
d.Read()
@@ -402,13 +402,13 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) erro
// Wrapper types are encoded as JSON primitives like string, number or boolean.
-func (e encoder) marshalWrapperType(m pref.Message) error {
+func (e encoder) marshalWrapperType(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
val := m.Get(fd)
return e.marshalSingular(val, fd)
}
-func (d decoder) unmarshalWrapperType(m pref.Message) error {
+func (d decoder) unmarshalWrapperType(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
val, err := d.unmarshalScalar(fd)
if err != nil {
@@ -420,13 +420,13 @@ func (d decoder) unmarshalWrapperType(m pref.Message) error {
// The JSON representation for Empty is an empty JSON object.
-func (e encoder) marshalEmpty(pref.Message) error {
+func (e encoder) marshalEmpty(protoreflect.Message) error {
e.StartObject()
e.EndObject()
return nil
}
-func (d decoder) unmarshalEmpty(pref.Message) error {
+func (d decoder) unmarshalEmpty(protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
@@ -462,12 +462,12 @@ func (d decoder) unmarshalEmpty(pref.Message) error {
// The JSON representation for Struct is a JSON object that contains the encoded
// Struct.fields map and follows the serialization rules for a map.
-func (e encoder) marshalStruct(m pref.Message) error {
+func (e encoder) marshalStruct(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
return e.marshalMap(m.Get(fd).Map(), fd)
}
-func (d decoder) unmarshalStruct(m pref.Message) error {
+func (d decoder) unmarshalStruct(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
return d.unmarshalMap(m.Mutable(fd).Map(), fd)
}
@@ -476,12 +476,12 @@ func (d decoder) unmarshalStruct(m pref.Message) error {
// ListValue.values repeated field and follows the serialization rules for a
// repeated field.
-func (e encoder) marshalListValue(m pref.Message) error {
+func (e encoder) marshalListValue(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
return e.marshalList(m.Get(fd).List(), fd)
}
-func (d decoder) unmarshalListValue(m pref.Message) error {
+func (d decoder) unmarshalListValue(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
return d.unmarshalList(m.Mutable(fd).List(), fd)
}
@@ -490,7 +490,7 @@ func (d decoder) unmarshalListValue(m pref.Message) error {
// set. Each of the field in the oneof has its own custom serialization rule. A
// Value message needs to be a oneof field set, else it is an error.
-func (e encoder) marshalKnownValue(m pref.Message) error {
+func (e encoder) marshalKnownValue(m protoreflect.Message) error {
od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
fd := m.WhichOneof(od)
if fd == nil {
@@ -504,19 +504,19 @@ func (e encoder) marshalKnownValue(m pref.Message) error {
return e.marshalSingular(m.Get(fd), fd)
}
-func (d decoder) unmarshalKnownValue(m pref.Message) error {
+func (d decoder) unmarshalKnownValue(m protoreflect.Message) error {
tok, err := d.Peek()
if err != nil {
return err
}
- var fd pref.FieldDescriptor
- var val pref.Value
+ var fd protoreflect.FieldDescriptor
+ var val protoreflect.Value
switch tok.Kind() {
case json.Null:
d.Read()
fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
- val = pref.ValueOfEnum(0)
+ val = protoreflect.ValueOfEnum(0)
case json.Bool:
tok, err := d.Read()
@@ -524,7 +524,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error {
return err
}
fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
- val = pref.ValueOfBool(tok.Bool())
+ val = protoreflect.ValueOfBool(tok.Bool())
case json.Number:
tok, err := d.Read()
@@ -550,7 +550,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error {
return err
}
fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
- val = pref.ValueOfString(tok.ParsedString())
+ val = protoreflect.ValueOfString(tok.ParsedString())
case json.ObjectOpen:
fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
@@ -591,7 +591,7 @@ const (
maxSecondsInDuration = 315576000000
)
-func (e encoder) marshalDuration(m pref.Message) error {
+func (e encoder) marshalDuration(m protoreflect.Message) error {
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
@@ -623,7 +623,7 @@ func (e encoder) marshalDuration(m pref.Message) error {
return nil
}
-func (d decoder) unmarshalDuration(m pref.Message) error {
+func (d decoder) unmarshalDuration(m protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
@@ -646,8 +646,8 @@ func (d decoder) unmarshalDuration(m pref.Message) error {
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
- m.Set(fdSeconds, pref.ValueOfInt64(secs))
- m.Set(fdNanos, pref.ValueOfInt32(nanos))
+ m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
+ m.Set(fdNanos, protoreflect.ValueOfInt32(nanos))
return nil
}
@@ -779,7 +779,7 @@ const (
minTimestampSeconds = -62135596800
)
-func (e encoder) marshalTimestamp(m pref.Message) error {
+func (e encoder) marshalTimestamp(m protoreflect.Message) error {
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
@@ -805,7 +805,7 @@ func (e encoder) marshalTimestamp(m pref.Message) error {
return nil
}
-func (d decoder) unmarshalTimestamp(m pref.Message) error {
+func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
@@ -829,8 +829,8 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error {
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
- m.Set(fdSeconds, pref.ValueOfInt64(secs))
- m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond())))
+ m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
+ m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond())))
return nil
}
@@ -839,14 +839,14 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error {
// lower-camel naming conventions. Encoding should fail if the path name would
// end up differently after a round-trip.
-func (e encoder) marshalFieldMask(m pref.Message) error {
+func (e encoder) marshalFieldMask(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
list := m.Get(fd).List()
paths := make([]string, 0, list.Len())
for i := 0; i < list.Len(); i++ {
s := list.Get(i).String()
- if !pref.FullName(s).IsValid() {
+ if !protoreflect.FullName(s).IsValid() {
return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
}
// Return error if conversion to camelCase is not reversible.
@@ -861,7 +861,7 @@ func (e encoder) marshalFieldMask(m pref.Message) error {
return nil
}
-func (d decoder) unmarshalFieldMask(m pref.Message) error {
+func (d decoder) unmarshalFieldMask(m protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
@@ -880,10 +880,10 @@ func (d decoder) unmarshalFieldMask(m pref.Message) error {
for _, s0 := range paths {
s := strs.JSONSnakeCase(s0)
- if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() {
+ if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() {
return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
}
- list.Append(pref.ValueOfString(s))
+ list.Append(protoreflect.ValueOfString(s))
}
return nil
}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index 179d6e8fc1..4921b2d4a7 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -17,7 +17,7 @@ import (
"google.golang.org/protobuf/internal/set"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
@@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
}
// unmarshalMessage unmarshals into the given protoreflect.Message.
-func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
+func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error {
messageDesc := m.Descriptor()
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
return errors.New("no support for proto1 MessageSets")
@@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
}
// Resolve the field descriptor.
- var name pref.Name
- var fd pref.FieldDescriptor
- var xt pref.ExtensionType
+ var name protoreflect.Name
+ var fd protoreflect.FieldDescriptor
+ var xt protoreflect.ExtensionType
var xtErr error
var isFieldNumberName bool
switch tok.NameKind() {
case text.IdentName:
- name = pref.Name(tok.IdentName())
+ name = protoreflect.Name(tok.IdentName())
fd = fieldDescs.ByTextName(string(name))
case text.TypeName:
// Handle extensions only. This code path is not for Any.
- xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName()))
+ xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName()))
case text.FieldNumber:
isFieldNumberName = true
- num := pref.FieldNumber(tok.FieldNumber())
+ num := protoreflect.FieldNumber(tok.FieldNumber())
if !num.IsValid() {
return d.newError(tok.Pos(), "invalid field number: %d", num)
}
@@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
switch {
case fd.IsList():
kind := fd.Kind()
- if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
@@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
default:
kind := fd.Kind()
- if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
@@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
// unmarshalSingular unmarshals a non-repeated field value specified by the
// given FieldDescriptor.
-func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
- var val pref.Value
+func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error {
+ var val protoreflect.Value
var err error
switch fd.Kind() {
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
val = m.NewField(fd)
err = d.unmarshalMessage(val.Message(), true)
default:
@@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro
// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
// given FieldDescriptor.
-func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
+func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok, err := d.Read()
if err != nil {
- return pref.Value{}, err
+ return protoreflect.Value{}, err
}
if tok.Kind() != text.Scalar {
- return pref.Value{}, d.unexpectedTokenError(tok)
+ return protoreflect.Value{}, d.unexpectedTokenError(tok)
}
kind := fd.Kind()
switch kind {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if b, ok := tok.Bool(); ok {
- return pref.ValueOfBool(b), nil
+ return protoreflect.ValueOfBool(b), nil
}
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if n, ok := tok.Int32(); ok {
- return pref.ValueOfInt32(n), nil
+ return protoreflect.ValueOfInt32(n), nil
}
- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if n, ok := tok.Int64(); ok {
- return pref.ValueOfInt64(n), nil
+ return protoreflect.ValueOfInt64(n), nil
}
- case pref.Uint32Kind, pref.Fixed32Kind:
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if n, ok := tok.Uint32(); ok {
- return pref.ValueOfUint32(n), nil
+ return protoreflect.ValueOfUint32(n), nil
}
- case pref.Uint64Kind, pref.Fixed64Kind:
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if n, ok := tok.Uint64(); ok {
- return pref.ValueOfUint64(n), nil
+ return protoreflect.ValueOfUint64(n), nil
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if n, ok := tok.Float32(); ok {
- return pref.ValueOfFloat32(n), nil
+ return protoreflect.ValueOfFloat32(n), nil
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if n, ok := tok.Float64(); ok {
- return pref.ValueOfFloat64(n), nil
+ return protoreflect.ValueOfFloat64(n), nil
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if s, ok := tok.String(); ok {
if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
- return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
+ return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
}
- return pref.ValueOfString(s), nil
+ return protoreflect.ValueOfString(s), nil
}
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if b, ok := tok.String(); ok {
- return pref.ValueOfBytes([]byte(b)), nil
+ return protoreflect.ValueOfBytes([]byte(b)), nil
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if lit, ok := tok.Enum(); ok {
// Lookup EnumNumber based on name.
- if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil {
- return pref.ValueOfEnum(enumVal.Number()), nil
+ if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil {
+ return protoreflect.ValueOfEnum(enumVal.Number()), nil
}
}
if num, ok := tok.Int32(); ok {
- return pref.ValueOfEnum(pref.EnumNumber(num)), nil
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil
}
default:
panic(fmt.Sprintf("invalid scalar kind %v", kind))
}
- return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
}
// unmarshalList unmarshals into given protoreflect.List. A list value can
// either be in [] syntax or simply just a single scalar/message value.
-func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
+func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error {
tok, err := d.Peek()
if err != nil {
return err
}
switch fd.Kind() {
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
switch tok.Kind() {
case text.ListOpen:
d.Read()
@@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
// textproto message containing {key: <kvalue>, value: <mvalue>}.
-func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
+func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error {
// Determine ahead whether map entry is a scalar type or a message type in
// order to call the appropriate unmarshalMapValue func inside
// unmarshalMapEntry.
- var unmarshalMapValue func() (pref.Value, error)
+ var unmarshalMapValue func() (protoreflect.Value, error)
switch fd.MapValue().Kind() {
- case pref.MessageKind, pref.GroupKind:
- unmarshalMapValue = func() (pref.Value, error) {
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ unmarshalMapValue = func() (protoreflect.Value, error) {
pval := mmap.NewValue()
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
- return pref.Value{}, err
+ return protoreflect.Value{}, err
}
return pval, nil
}
default:
- unmarshalMapValue = func() (pref.Value, error) {
+ unmarshalMapValue = func() (protoreflect.Value, error) {
return d.unmarshalScalar(fd.MapValue())
}
}
@@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
// textproto message containing {key: <kvalue>, value: <mvalue>}.
-func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error {
- var key pref.MapKey
- var pval pref.Value
+func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error {
+ var key protoreflect.MapKey
+ var pval protoreflect.Value
Loop:
for {
// Read field name.
@@ -520,7 +520,7 @@ Loop:
return d.unexpectedTokenError(tok)
}
- switch name := pref.Name(tok.IdentName()); name {
+ switch name := protoreflect.Name(tok.IdentName()); name {
case genid.MapEntry_Key_field_name:
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
@@ -535,7 +535,7 @@ Loop:
key = val.MapKey()
case genid.MapEntry_Value_field_name:
- if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
+ if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) {
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
@@ -561,7 +561,7 @@ Loop:
}
if !pval.IsValid() {
switch fd.MapValue().Kind() {
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
// If value field is not set for message/group types, construct an
// empty one as default.
pval = mmap.NewValue()
@@ -575,7 +575,7 @@ Loop:
// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
// or non-expanded form.
-func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
+func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error {
var typeURL string
var bValue []byte
var seenTypeUrl bool
@@ -619,7 +619,7 @@ Loop:
return d.syntaxError(tok.Pos(), "missing field separator :")
}
- switch name := pref.Name(tok.IdentName()); name {
+ switch name := protoreflect.Name(tok.IdentName()); name {
case genid.Any_TypeUrl_field_name:
if seenTypeUrl {
return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname)
@@ -686,10 +686,10 @@ Loop:
fds := m.Descriptor().Fields()
if len(typeURL) > 0 {
- m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL))
+ m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL))
}
if len(bValue) > 0 {
- m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue))
+ m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue))
}
return nil
}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 8d5304dc5b..ebf6c65284 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -20,7 +20,6 @@ import (
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
@@ -150,7 +149,7 @@ type encoder struct {
}
// marshalMessage marshals the given protoreflect.Message.
-func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
+func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error {
messageDesc := m.Descriptor()
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
return errors.New("no support for proto1 MessageSets")
@@ -190,7 +189,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
}
// marshalField marshals the given field with protoreflect.Value.
-func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error {
+func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch {
case fd.IsList():
return e.marshalList(name, val.List(), fd)
@@ -204,40 +203,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript
// marshalSingular marshals the given non-repeated field value. This includes
// all scalar types, enums, messages, and groups.
-func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
+func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
kind := fd.Kind()
switch kind {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
e.WriteBool(val.Bool())
- case pref.StringKind:
+ case protoreflect.StringKind:
s := val.String()
if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
return errors.InvalidUTF8(string(fd.FullName()))
}
e.WriteString(s)
- case pref.Int32Kind, pref.Int64Kind,
- pref.Sint32Kind, pref.Sint64Kind,
- pref.Sfixed32Kind, pref.Sfixed64Kind:
+ case protoreflect.Int32Kind, protoreflect.Int64Kind,
+ protoreflect.Sint32Kind, protoreflect.Sint64Kind,
+ protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
e.WriteInt(val.Int())
- case pref.Uint32Kind, pref.Uint64Kind,
- pref.Fixed32Kind, pref.Fixed64Kind:
+ case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
+ protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
e.WriteUint(val.Uint())
- case pref.FloatKind:
+ case protoreflect.FloatKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 32)
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 64)
- case pref.BytesKind:
+ case protoreflect.BytesKind:
e.WriteString(string(val.Bytes()))
- case pref.EnumKind:
+ case protoreflect.EnumKind:
num := val.Enum()
if desc := fd.Enum().Values().ByNumber(num); desc != nil {
e.WriteLiteral(string(desc.Name()))
@@ -246,7 +245,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
e.WriteInt(int64(num))
}
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
return e.marshalMessage(val.Message(), true)
default:
@@ -256,7 +255,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
}
// marshalList marshals the given protoreflect.List as multiple name-value fields.
-func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error {
+func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error {
size := list.Len()
for i := 0; i < size; i++ {
e.WriteName(name)
@@ -268,9 +267,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto
}
// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
-func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
+func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
var err error
- order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool {
+ order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool {
e.WriteName(name)
e.StartMessage()
defer e.EndMessage()
@@ -334,7 +333,7 @@ func (e encoder) marshalUnknown(b []byte) {
// marshalAny marshals the given google.protobuf.Any message in expanded form.
// It returns true if it was able to marshal, else false.
-func (e encoder) marshalAny(any pref.Message) bool {
+func (e encoder) marshalAny(any protoreflect.Message) bool {
// Construct the embedded message.
fds := any.Descriptor().Fields()
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index 9c61112f58..ce57f57ebd 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -516,6 +516,7 @@ func EncodeTag(num Number, typ Type) uint64 {
}
// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
+//
// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
func DecodeZigZag(x uint64) int64 {
@@ -523,6 +524,7 @@ func DecodeZigZag(x uint64) int64 {
}
// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
+//
// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
func EncodeZigZag(x int64) uint64 {
@@ -530,6 +532,7 @@ func EncodeZigZag(x int64) uint64 {
}
// DecodeBool decodes a uint64 as a bool.
+//
// Input: { 0, 1, 2, …}
// Output: {false, true, true, …}
func DecodeBool(x uint64) bool {
@@ -537,6 +540,7 @@ func DecodeBool(x uint64) bool {
}
// EncodeBool encodes a bool as a uint64.
+//
// Input: {false, true}
// Output: { 0, 1}
func EncodeBool(x bool) uint64 {
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index 360c63329d..db5248e1b5 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/pragma"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type list interface {
@@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
if isRoot {
var name string
switch vs.(type) {
- case pref.Names:
+ case protoreflect.Names:
name = "Names"
- case pref.FieldNumbers:
+ case protoreflect.FieldNumbers:
name = "FieldNumbers"
- case pref.FieldRanges:
+ case protoreflect.FieldRanges:
name = "FieldRanges"
- case pref.EnumRanges:
+ case protoreflect.EnumRanges:
name = "EnumRanges"
- case pref.FileImports:
+ case protoreflect.FileImports:
name = "FileImports"
- case pref.Descriptor:
+ case protoreflect.Descriptor:
name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
default:
name = reflect.ValueOf(vs).Elem().Type().Name()
@@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
var ss []string
switch vs := vs.(type) {
- case pref.Names:
+ case protoreflect.Names:
for i := 0; i < vs.Len(); i++ {
ss = append(ss, fmt.Sprint(vs.Get(i)))
}
return start + joinStrings(ss, false) + end
- case pref.FieldNumbers:
+ case protoreflect.FieldNumbers:
for i := 0; i < vs.Len(); i++ {
ss = append(ss, fmt.Sprint(vs.Get(i)))
}
return start + joinStrings(ss, false) + end
- case pref.FieldRanges:
+ case protoreflect.FieldRanges:
for i := 0; i < vs.Len(); i++ {
r := vs.Get(i)
if r[0]+1 == r[1] {
@@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
}
}
return start + joinStrings(ss, false) + end
- case pref.EnumRanges:
+ case protoreflect.EnumRanges:
for i := 0; i < vs.Len(); i++ {
r := vs.Get(i)
if r[0] == r[1] {
@@ -80,7 +80,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
}
}
return start + joinStrings(ss, false) + end
- case pref.FileImports:
+ case protoreflect.FileImports:
for i := 0; i < vs.Len(); i++ {
var rs records
rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
@@ -88,11 +88,11 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
}
return start + joinStrings(ss, allowMulti) + end
default:
- _, isEnumValue := vs.(pref.EnumValueDescriptors)
+ _, isEnumValue := vs.(protoreflect.EnumValueDescriptors)
for i := 0; i < vs.Len(); i++ {
m := reflect.ValueOf(vs).MethodByName("Get")
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
- ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue))
+ ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue))
}
return start + joinStrings(ss, allowMulti && isEnumValue) + end
}
@@ -106,20 +106,20 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
//
// Using a list allows us to print the accessors in a sensible order.
var descriptorAccessors = map[reflect.Type][]string{
- reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
- reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
- reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
- reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
- reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
- reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
- reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"},
- reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
+ reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
+ reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
+ reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
+ reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
+ reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
+ reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"},
+ reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"},
+ reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
}
-func FormatDesc(s fmt.State, r rune, t pref.Descriptor) {
+func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) {
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
}
-func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
+func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
rv := reflect.ValueOf(t)
rt := rv.MethodByName("ProtoType").Type().In(0)
@@ -128,7 +128,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
start = rt.Name() + "{"
}
- _, isFile := t.(pref.FileDescriptor)
+ _, isFile := t.(protoreflect.FileDescriptor)
rs := records{allowMulti: allowMulti}
if t.IsPlaceholder() {
if isFile {
@@ -146,7 +146,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
rs.Append(rv, "Name")
}
switch t := t.(type) {
- case pref.FieldDescriptor:
+ case protoreflect.FieldDescriptor:
for _, s := range descriptorAccessors[rt] {
switch s {
case "MapKey":
@@ -156,9 +156,9 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
case "MapValue":
if v := t.MapValue(); v != nil {
switch v.Kind() {
- case pref.EnumKind:
+ case protoreflect.EnumKind:
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
default:
rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
@@ -180,7 +180,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
rs.Append(rv, s)
}
}
- case pref.OneofDescriptor:
+ case protoreflect.OneofDescriptor:
var ss []string
fs := t.Fields()
for i := 0; i < fs.Len(); i++ {
@@ -216,7 +216,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) {
if !rv.IsValid() {
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
}
- if _, ok := rv.Interface().(pref.Value); ok {
+ if _, ok := rv.Interface().(protoreflect.Value); ok {
rv = rv.MethodByName("Interface").Call(nil)[0]
if !rv.IsNil() {
rv = rv.Elem()
@@ -250,9 +250,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) {
switch v := v.(type) {
case list:
s = formatListOpt(v, false, rs.allowMulti)
- case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor:
- s = string(v.(pref.Descriptor).Name())
- case pref.Descriptor:
+ case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor:
+ s = string(v.(protoreflect.Descriptor).Name())
+ case protoreflect.Descriptor:
s = string(v.FullName())
case string:
s = strconv.Quote(v)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
index fdd9b13f2f..328dc733b0 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
@@ -15,8 +15,8 @@ import (
"strconv"
ptext "google.golang.org/protobuf/internal/encoding/text"
- errors "google.golang.org/protobuf/internal/errors"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// Format is the serialization format used to represent the default value.
@@ -35,56 +35,56 @@ const (
// Unmarshal deserializes the default string s according to the given kind k.
// When k is an enum, a list of enum value descriptors must be provided.
-func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) {
+func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) {
switch k {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if f == GoTag {
switch s {
case "1":
- return pref.ValueOfBool(true), nil, nil
+ return protoreflect.ValueOfBool(true), nil, nil
case "0":
- return pref.ValueOfBool(false), nil, nil
+ return protoreflect.ValueOfBool(false), nil, nil
}
} else {
switch s {
case "true":
- return pref.ValueOfBool(true), nil, nil
+ return protoreflect.ValueOfBool(true), nil, nil
case "false":
- return pref.ValueOfBool(false), nil, nil
+ return protoreflect.ValueOfBool(false), nil, nil
}
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if f == GoTag {
// Go tags use the numeric form of the enum value.
if n, err := strconv.ParseInt(s, 10, 32); err == nil {
- if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil {
- return pref.ValueOfEnum(ev.Number()), ev, nil
+ if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil {
+ return protoreflect.ValueOfEnum(ev.Number()), ev, nil
}
}
} else {
// Descriptor default_value use the enum identifier.
- ev := evs.ByName(pref.Name(s))
+ ev := evs.ByName(protoreflect.Name(s))
if ev != nil {
- return pref.ValueOfEnum(ev.Number()), ev, nil
+ return protoreflect.ValueOfEnum(ev.Number()), ev, nil
}
}
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if v, err := strconv.ParseInt(s, 10, 32); err == nil {
- return pref.ValueOfInt32(int32(v)), nil, nil
+ return protoreflect.ValueOfInt32(int32(v)), nil, nil
}
- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if v, err := strconv.ParseInt(s, 10, 64); err == nil {
- return pref.ValueOfInt64(int64(v)), nil, nil
+ return protoreflect.ValueOfInt64(int64(v)), nil, nil
}
- case pref.Uint32Kind, pref.Fixed32Kind:
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if v, err := strconv.ParseUint(s, 10, 32); err == nil {
- return pref.ValueOfUint32(uint32(v)), nil, nil
+ return protoreflect.ValueOfUint32(uint32(v)), nil, nil
}
- case pref.Uint64Kind, pref.Fixed64Kind:
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if v, err := strconv.ParseUint(s, 10, 64); err == nil {
- return pref.ValueOfUint64(uint64(v)), nil, nil
+ return protoreflect.ValueOfUint64(uint64(v)), nil, nil
}
- case pref.FloatKind, pref.DoubleKind:
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
var v float64
var err error
switch s {
@@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (
v, err = strconv.ParseFloat(s, 64)
}
if err == nil {
- if k == pref.FloatKind {
- return pref.ValueOfFloat32(float32(v)), nil, nil
+ if k == protoreflect.FloatKind {
+ return protoreflect.ValueOfFloat32(float32(v)), nil, nil
} else {
- return pref.ValueOfFloat64(float64(v)), nil, nil
+ return protoreflect.ValueOfFloat64(float64(v)), nil, nil
}
}
- case pref.StringKind:
+ case protoreflect.StringKind:
// String values are already unescaped and can be used as is.
- return pref.ValueOfString(s), nil, nil
- case pref.BytesKind:
+ return protoreflect.ValueOfString(s), nil, nil
+ case protoreflect.BytesKind:
if b, ok := unmarshalBytes(s); ok {
- return pref.ValueOfBytes(b), nil, nil
+ return protoreflect.ValueOfBytes(b), nil, nil
}
}
- return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
+ return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
}
// Marshal serializes v as the default string according to the given kind k.
// When specifying the Descriptor format for an enum kind, the associated
// enum value descriptor must be provided.
-func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) {
+func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) {
switch k {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if f == GoTag {
if v.Bool() {
return "1", nil
@@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (
return "false", nil
}
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if f == GoTag {
return strconv.FormatInt(int64(v.Enum()), 10), nil
} else {
return string(ev.Name()), nil
}
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return strconv.FormatInt(v.Int(), 10), nil
- case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind:
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return strconv.FormatUint(v.Uint(), 10), nil
- case pref.FloatKind, pref.DoubleKind:
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
f := v.Float()
switch {
case math.IsInf(f, -1):
@@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (
case math.IsNaN(f):
return "nan", nil
default:
- if k == pref.FloatKind {
+ if k == protoreflect.FloatKind {
return strconv.FormatFloat(f, 'g', -1, 32), nil
} else {
return strconv.FormatFloat(f, 'g', -1, 64), nil
}
}
- case pref.StringKind:
+ case protoreflect.StringKind:
// String values are serialized as is without any escaping.
return v.String(), nil
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if s, ok := marshalBytes(v.Bytes()); ok {
return s, nil
}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
index c1866f3c1a..a6693f0a2f 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// The MessageSet wire format is equivalent to a message defined as follows,
@@ -33,6 +33,7 @@ const (
// ExtensionName is the field name for extensions of MessageSet.
//
// A valid MessageSet extension must be of the form:
+//
// message MyMessage {
// extend proto2.bridge.MessageSet {
// optional MyMessage message_set_extension = 1234;
@@ -42,13 +43,13 @@ const (
const ExtensionName = "message_set_extension"
// IsMessageSet returns whether the message uses the MessageSet wire format.
-func IsMessageSet(md pref.MessageDescriptor) bool {
+func IsMessageSet(md protoreflect.MessageDescriptor) bool {
xmd, ok := md.(interface{ IsMessageSet() bool })
return ok && xmd.IsMessageSet()
}
// IsMessageSetExtension reports this field properly extends a MessageSet.
-func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
+func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool {
switch {
case fd.Name() != ExtensionName:
return false
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 38f1931c6f..373d208374 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -11,10 +11,10 @@ import (
"strconv"
"strings"
- defval "google.golang.org/protobuf/internal/encoding/defval"
- fdesc "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/strs"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
var byteType = reflect.TypeOf(byte(0))
@@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0))
// This does not populate the Enum or Message (except for weak message).
//
// This function is a best effort attempt; parsing errors are ignored.
-func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor {
- f := new(fdesc.Field)
- f.L0.ParentFile = fdesc.SurrogateProto2
+func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
+ f := new(filedesc.Field)
+ f.L0.ParentFile = filedesc.SurrogateProto2
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
@@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p
}
switch s := tag[:i]; {
case strings.HasPrefix(s, "name="):
- f.L0.FullName = pref.FullName(s[len("name="):])
+ f.L0.FullName = protoreflect.FullName(s[len("name="):])
case strings.Trim(s, "0123456789") == "":
n, _ := strconv.ParseUint(s, 10, 32)
- f.L1.Number = pref.FieldNumber(n)
+ f.L1.Number = protoreflect.FieldNumber(n)
case s == "opt":
- f.L1.Cardinality = pref.Optional
+ f.L1.Cardinality = protoreflect.Optional
case s == "req":
- f.L1.Cardinality = pref.Required
+ f.L1.Cardinality = protoreflect.Required
case s == "rep":
- f.L1.Cardinality = pref.Repeated
+ f.L1.Cardinality = protoreflect.Repeated
case s == "varint":
switch goType.Kind() {
case reflect.Bool:
- f.L1.Kind = pref.BoolKind
+ f.L1.Kind = protoreflect.BoolKind
case reflect.Int32:
- f.L1.Kind = pref.Int32Kind
+ f.L1.Kind = protoreflect.Int32Kind
case reflect.Int64:
- f.L1.Kind = pref.Int64Kind
+ f.L1.Kind = protoreflect.Int64Kind
case reflect.Uint32:
- f.L1.Kind = pref.Uint32Kind
+ f.L1.Kind = protoreflect.Uint32Kind
case reflect.Uint64:
- f.L1.Kind = pref.Uint64Kind
+ f.L1.Kind = protoreflect.Uint64Kind
}
case s == "zigzag32":
if goType.Kind() == reflect.Int32 {
- f.L1.Kind = pref.Sint32Kind
+ f.L1.Kind = protoreflect.Sint32Kind
}
case s == "zigzag64":
if goType.Kind() == reflect.Int64 {
- f.L1.Kind = pref.Sint64Kind
+ f.L1.Kind = protoreflect.Sint64Kind
}
case s == "fixed32":
switch goType.Kind() {
case reflect.Int32:
- f.L1.Kind = pref.Sfixed32Kind
+ f.L1.Kind = protoreflect.Sfixed32Kind
case reflect.Uint32:
- f.L1.Kind = pref.Fixed32Kind
+ f.L1.Kind = protoreflect.Fixed32Kind
case reflect.Float32:
- f.L1.Kind = pref.FloatKind
+ f.L1.Kind = protoreflect.FloatKind
}
case s == "fixed64":
switch goType.Kind() {
case reflect.Int64:
- f.L1.Kind = pref.Sfixed64Kind
+ f.L1.Kind = protoreflect.Sfixed64Kind
case reflect.Uint64:
- f.L1.Kind = pref.Fixed64Kind
+ f.L1.Kind = protoreflect.Fixed64Kind
case reflect.Float64:
- f.L1.Kind = pref.DoubleKind
+ f.L1.Kind = protoreflect.DoubleKind
}
case s == "bytes":
switch {
case goType.Kind() == reflect.String:
- f.L1.Kind = pref.StringKind
+ f.L1.Kind = protoreflect.StringKind
case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
- f.L1.Kind = pref.BytesKind
+ f.L1.Kind = protoreflect.BytesKind
default:
- f.L1.Kind = pref.MessageKind
+ f.L1.Kind = protoreflect.MessageKind
}
case s == "group":
- f.L1.Kind = pref.GroupKind
+ f.L1.Kind = protoreflect.GroupKind
case strings.HasPrefix(s, "enum="):
- f.L1.Kind = pref.EnumKind
+ f.L1.Kind = protoreflect.EnumKind
case strings.HasPrefix(s, "json="):
jsonName := s[len("json="):]
if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
@@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p
f.L1.IsPacked = true
case strings.HasPrefix(s, "weak="):
f.L1.IsWeak = true
- f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):]))
+ f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
s, i = tag[len("def="):], len(tag)
v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
- f.L1.Default = fdesc.DefaultValue(v, ev)
+ f.L1.Default = filedesc.DefaultValue(v, ev)
case s == "proto3":
- f.L0.ParentFile = fdesc.SurrogateProto3
+ f.L0.ParentFile = filedesc.SurrogateProto3
}
tag = strings.TrimPrefix(tag[i:], ",")
}
// The generator uses the group message name instead of the field name.
// We obtain the real field name by lowercasing the group name.
- if f.L1.Kind == pref.GroupKind {
- f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName)))
+ if f.L1.Kind == protoreflect.GroupKind {
+ f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName)))
}
return f
}
@@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p
// Depending on the context on how Marshal is called, there are different ways
// through which that information is determined. As such it is the caller's
// responsibility to provide a function to obtain that information.
-func Marshal(fd pref.FieldDescriptor, enumName string) string {
+func Marshal(fd protoreflect.FieldDescriptor, enumName string) string {
var tag []string
switch fd.Kind() {
- case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind:
+ case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind:
tag = append(tag, "varint")
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
tag = append(tag, "zigzag32")
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
tag = append(tag, "zigzag64")
- case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind:
+ case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind:
tag = append(tag, "fixed32")
- case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind:
+ case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind:
tag = append(tag, "fixed64")
- case pref.StringKind, pref.BytesKind, pref.MessageKind:
+ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind:
tag = append(tag, "bytes")
- case pref.GroupKind:
+ case protoreflect.GroupKind:
tag = append(tag, "group")
}
tag = append(tag, strconv.Itoa(int(fd.Number())))
switch fd.Cardinality() {
- case pref.Optional:
+ case protoreflect.Optional:
tag = append(tag, "opt")
- case pref.Required:
+ case protoreflect.Required:
tag = append(tag, "req")
- case pref.Repeated:
+ case protoreflect.Repeated:
tag = append(tag, "rep")
}
if fd.IsPacked() {
tag = append(tag, "packed")
}
name := string(fd.Name())
- if fd.Kind() == pref.GroupKind {
+ if fd.Kind() == protoreflect.GroupKind {
// The name of the FieldDescriptor for a group field is
// lowercased. To find the original capitalization, we
// look in the field's MessageType.
@@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string {
// The previous implementation does not tag extension fields as proto3,
// even when the field is defined in a proto3 file. Match that behavior
// for consistency.
- if fd.Syntax() == pref.Proto3 && !fd.IsExtension() {
+ if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() {
tag = append(tag, "proto3")
}
- if fd.Kind() == pref.EnumKind && enumName != "" {
+ if fd.Kind() == protoreflect.EnumKind && enumName != "" {
tag = append(tag, "enum="+enumName)
}
if fd.ContainingOneof() != nil {
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
index 37803773fa..427c62d037 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -8,7 +8,6 @@ import (
"bytes"
"fmt"
"io"
- "regexp"
"strconv"
"unicode/utf8"
@@ -421,7 +420,7 @@ func (d *Decoder) parseFieldName() (tok Token, err error) {
return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
}
- return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in))
+ return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))
}
// parseTypeName parses Any type URL or extension field name. The name is
@@ -571,7 +570,7 @@ func (d *Decoder) parseScalar() (Token, error) {
return tok, nil
}
- return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in))
+ return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in))
}
// parseLiteralValue parses a literal value. A literal value is used for
@@ -653,8 +652,29 @@ func consume(b []byte, n int) []byte {
return b
}
-// Any sequence that looks like a non-delimiter (for error reporting).
-var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`)
+// errId extracts a byte sequence that looks like an invalid ID
+// (for the purposes of error reporting).
+func errId(seq []byte) []byte {
+ const maxLen = 32
+ for i := 0; i < len(seq); {
+ if i > maxLen {
+ return append(seq[:i:i], "…"...)
+ }
+ r, size := utf8.DecodeRune(seq[i:])
+ if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) {
+ if i == 0 {
+ // Either the first byte is invalid UTF-8 or a
+ // delimiter, or the first rune is non-ASCII.
+ // Return it as-is.
+ i = size
+ }
+ return seq[:i:i]
+ }
+ i += size
+ }
+ // No delimiter found.
+ return seq
+}
// isDelim returns true if given byte is a delimiter character.
func isDelim(c byte) bool {
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
index f2d90b7899..81a5d8c861 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
@@ -50,8 +50,10 @@ type number struct {
// parseNumber constructs a number object from given input. It allows for the
// following patterns:
-// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)
-// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?)
+//
+// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)
+// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?)
+//
// It also returns the number of parsed bytes for the given number, 0 if it is
// not a number.
func parseNumber(input []byte) number {
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
index 0ce8d6fb83..7ae6c2a3c2 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
@@ -24,6 +24,6 @@
// the Go implementation should as well.
//
// The text format is almost a superset of JSON except:
-// * message keys are not quoted strings, but identifiers
-// * the top-level value must be a message without the delimiters
+// - message keys are not quoted strings, but identifiers
+// - the top-level value must be a message without the delimiters
package text
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
index b293b69473..7cac1c1901 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
@@ -12,8 +12,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/reflect/protoregistry"
)
// Builder construct a protoreflect.FileDescriptor from the raw descriptor.
@@ -38,7 +37,7 @@ type Builder struct {
// TypeResolver resolves extension field types for descriptor options.
// If nil, it uses protoregistry.GlobalTypes.
TypeResolver interface {
- preg.ExtensionTypeResolver
+ protoregistry.ExtensionTypeResolver
}
// FileRegistry is use to lookup file, enum, and message dependencies.
@@ -46,8 +45,8 @@ type Builder struct {
// If nil, it uses protoregistry.GlobalFiles.
FileRegistry interface {
FindFileByPath(string) (protoreflect.FileDescriptor, error)
- FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
- RegisterFile(pref.FileDescriptor) error
+ FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
+ RegisterFile(protoreflect.FileDescriptor) error
}
}
@@ -55,8 +54,8 @@ type Builder struct {
// If so, it permits looking up an enum or message dependency based on the
// sub-list and element index into filetype.Builder.DependencyIndexes.
type resolverByIndex interface {
- FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor
- FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor
+ FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor
+ FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor
}
// Indexes of each sub-list in filetype.Builder.DependencyIndexes.
@@ -70,7 +69,7 @@ const (
// Out is the output of the Builder.
type Out struct {
- File pref.FileDescriptor
+ File protoreflect.FileDescriptor
// Enums is all enum descriptors in "flattened ordering".
Enums []Enum
@@ -97,10 +96,10 @@ func (db Builder) Build() (out Out) {
// Initialize resolvers and registries if unpopulated.
if db.TypeResolver == nil {
- db.TypeResolver = preg.GlobalTypes
+ db.TypeResolver = protoregistry.GlobalTypes
}
if db.FileRegistry == nil {
- db.FileRegistry = preg.GlobalFiles
+ db.FileRegistry = protoregistry.GlobalFiles
}
fd := newRawFile(db)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 98ab142aee..7c3689baee 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -17,7 +17,7 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
@@ -43,9 +43,9 @@ type (
L2 *FileL2
}
FileL1 struct {
- Syntax pref.Syntax
+ Syntax protoreflect.Syntax
Path string
- Package pref.FullName
+ Package protoreflect.FullName
Enums Enums
Messages Messages
@@ -53,36 +53,36 @@ type (
Services Services
}
FileL2 struct {
- Options func() pref.ProtoMessage
+ Options func() protoreflect.ProtoMessage
Imports FileImports
Locations SourceLocations
}
)
-func (fd *File) ParentFile() pref.FileDescriptor { return fd }
-func (fd *File) Parent() pref.Descriptor { return nil }
-func (fd *File) Index() int { return 0 }
-func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax }
-func (fd *File) Name() pref.Name { return fd.L1.Package.Name() }
-func (fd *File) FullName() pref.FullName { return fd.L1.Package }
-func (fd *File) IsPlaceholder() bool { return false }
-func (fd *File) Options() pref.ProtoMessage {
+func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
+func (fd *File) Parent() protoreflect.Descriptor { return nil }
+func (fd *File) Index() int { return 0 }
+func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
+func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
+func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool { return false }
+func (fd *File) Options() protoreflect.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
}
return descopts.File
}
-func (fd *File) Path() string { return fd.L1.Path }
-func (fd *File) Package() pref.FullName { return fd.L1.Package }
-func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports }
-func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums }
-func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages }
-func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions }
-func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services }
-func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations }
-func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
-func (fd *File) ProtoType(pref.FileDescriptor) {}
-func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
+func (fd *File) Path() string { return fd.L1.Path }
+func (fd *File) Package() protoreflect.FullName { return fd.L1.Package }
+func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports }
+func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums }
+func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages }
+func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions }
+func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services }
+func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations }
+func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *File) ProtoType(protoreflect.FileDescriptor) {}
+func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
func (fd *File) lazyInit() *FileL2 {
if atomic.LoadUint32(&fd.once) == 0 {
@@ -119,7 +119,7 @@ type (
eagerValues bool // controls whether EnumL2.Values is already populated
}
EnumL2 struct {
- Options func() pref.ProtoMessage
+ Options func() protoreflect.ProtoMessage
Values EnumValues
ReservedNames Names
ReservedRanges EnumRanges
@@ -130,41 +130,41 @@ type (
L1 EnumValueL1
}
EnumValueL1 struct {
- Options func() pref.ProtoMessage
- Number pref.EnumNumber
+ Options func() protoreflect.ProtoMessage
+ Number protoreflect.EnumNumber
}
)
-func (ed *Enum) Options() pref.ProtoMessage {
+func (ed *Enum) Options() protoreflect.ProtoMessage {
if f := ed.lazyInit().Options; f != nil {
return f()
}
return descopts.Enum
}
-func (ed *Enum) Values() pref.EnumValueDescriptors {
+func (ed *Enum) Values() protoreflect.EnumValueDescriptors {
if ed.L1.eagerValues {
return &ed.L2.Values
}
return &ed.lazyInit().Values
}
-func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames }
-func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges }
-func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
-func (ed *Enum) ProtoType(pref.EnumDescriptor) {}
+func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames }
+func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
+func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {}
func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
}
-func (ed *EnumValue) Options() pref.ProtoMessage {
+func (ed *EnumValue) Options() protoreflect.ProtoMessage {
if f := ed.L1.Options; f != nil {
return f()
}
return descopts.EnumValue
}
-func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number }
-func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
-func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {}
+func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number }
+func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {}
type (
Message struct {
@@ -180,14 +180,14 @@ type (
IsMessageSet bool // promoted from google.protobuf.MessageOptions
}
MessageL2 struct {
- Options func() pref.ProtoMessage
+ Options func() protoreflect.ProtoMessage
Fields Fields
Oneofs Oneofs
ReservedNames Names
ReservedRanges FieldRanges
RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality
ExtensionRanges FieldRanges
- ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges
+ ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges
}
Field struct {
@@ -195,10 +195,10 @@ type (
L1 FieldL1
}
FieldL1 struct {
- Options func() pref.ProtoMessage
- Number pref.FieldNumber
- Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers
- Kind pref.Kind
+ Options func() protoreflect.ProtoMessage
+ Number protoreflect.FieldNumber
+ Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers
+ Kind protoreflect.Kind
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
@@ -207,9 +207,9 @@ type (
HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
Default defaultValue
- ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields
- Enum pref.EnumDescriptor
- Message pref.MessageDescriptor
+ ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
+ Enum protoreflect.EnumDescriptor
+ Message protoreflect.MessageDescriptor
}
Oneof struct {
@@ -217,35 +217,35 @@ type (
L1 OneofL1
}
OneofL1 struct {
- Options func() pref.ProtoMessage
+ Options func() protoreflect.ProtoMessage
Fields OneofFields // must be consistent with Message.Fields.ContainingOneof
}
)
-func (md *Message) Options() pref.ProtoMessage {
+func (md *Message) Options() protoreflect.ProtoMessage {
if f := md.lazyInit().Options; f != nil {
return f()
}
return descopts.Message
}
-func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry }
-func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields }
-func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs }
-func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames }
-func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges }
-func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers }
-func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges }
-func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage {
+func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry }
+func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields }
+func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs }
+func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames }
+func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges }
+func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers }
+func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges }
+func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage {
if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil {
return f()
}
return descopts.ExtensionRange
}
-func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums }
-func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages }
-func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions }
-func (md *Message) ProtoType(pref.MessageDescriptor) {}
-func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums }
+func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages }
+func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
+func (md *Message) ProtoType(protoreflect.MessageDescriptor) {}
+func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
func (md *Message) lazyInit() *MessageL2 {
md.L0.ParentFile.lazyInit() // implicitly initializes L2
return md.L2
@@ -260,28 +260,28 @@ func (md *Message) IsMessageSet() bool {
return md.L1.IsMessageSet
}
-func (fd *Field) Options() pref.ProtoMessage {
+func (fd *Field) Options() protoreflect.ProtoMessage {
if f := fd.L1.Options; f != nil {
return f()
}
return descopts.Field
}
-func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number }
-func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality }
-func (fd *Field) Kind() pref.Kind { return fd.L1.Kind }
-func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON }
-func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
-func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
+func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number }
+func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality }
+func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind }
+func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON }
+func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
+func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
func (fd *Field) HasPresence() bool {
- return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
+ return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
}
func (fd *Field) HasOptionalKeyword() bool {
- return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
+ return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
}
func (fd *Field) IsPacked() bool {
- if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated {
+ if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated {
switch fd.L1.Kind {
- case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind:
+ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
default:
return true
}
@@ -290,40 +290,40 @@ func (fd *Field) IsPacked() bool {
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
-func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() }
+func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
-func (fd *Field) MapKey() pref.FieldDescriptor {
+func (fd *Field) MapKey() protoreflect.FieldDescriptor {
if !fd.IsMap() {
return nil
}
return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number)
}
-func (fd *Field) MapValue() pref.FieldDescriptor {
+func (fd *Field) MapValue() protoreflect.FieldDescriptor {
if !fd.IsMap() {
return nil
}
return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number)
}
-func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
-func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) }
-func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum }
-func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof }
-func (fd *Field) ContainingMessage() pref.MessageDescriptor {
- return fd.L0.Parent.(pref.MessageDescriptor)
+func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
+func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) }
+func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum }
+func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof }
+func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor {
+ return fd.L0.Parent.(protoreflect.MessageDescriptor)
}
-func (fd *Field) Enum() pref.EnumDescriptor {
+func (fd *Field) Enum() protoreflect.EnumDescriptor {
return fd.L1.Enum
}
-func (fd *Field) Message() pref.MessageDescriptor {
+func (fd *Field) Message() protoreflect.MessageDescriptor {
if fd.L1.IsWeak {
if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
- return d.(pref.MessageDescriptor)
+ return d.(protoreflect.MessageDescriptor)
}
}
return fd.L1.Message
}
-func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
-func (fd *Field) ProtoType(pref.FieldDescriptor) {}
+func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8
// validation for the string field. This exists for Google-internal use only
@@ -336,21 +336,21 @@ func (fd *Field) EnforceUTF8() bool {
if fd.L1.HasEnforceUTF8 {
return fd.L1.EnforceUTF8
}
- return fd.L0.ParentFile.L1.Syntax == pref.Proto3
+ return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3
}
func (od *Oneof) IsSynthetic() bool {
- return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword()
+ return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword()
}
-func (od *Oneof) Options() pref.ProtoMessage {
+func (od *Oneof) Options() protoreflect.ProtoMessage {
if f := od.L1.Options; f != nil {
return f()
}
return descopts.Oneof
}
-func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields }
-func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) }
-func (od *Oneof) ProtoType(pref.OneofDescriptor) {}
+func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields }
+func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) }
+func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {}
type (
Extension struct {
@@ -359,55 +359,57 @@ type (
L2 *ExtensionL2 // protected by fileDesc.once
}
ExtensionL1 struct {
- Number pref.FieldNumber
- Extendee pref.MessageDescriptor
- Cardinality pref.Cardinality
- Kind pref.Kind
+ Number protoreflect.FieldNumber
+ Extendee protoreflect.MessageDescriptor
+ Cardinality protoreflect.Cardinality
+ Kind protoreflect.Kind
}
ExtensionL2 struct {
- Options func() pref.ProtoMessage
+ Options func() protoreflect.ProtoMessage
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsPacked bool // promoted from google.protobuf.FieldOptions
Default defaultValue
- Enum pref.EnumDescriptor
- Message pref.MessageDescriptor
+ Enum protoreflect.EnumDescriptor
+ Message protoreflect.MessageDescriptor
}
)
-func (xd *Extension) Options() pref.ProtoMessage {
+func (xd *Extension) Options() protoreflect.ProtoMessage {
if f := xd.lazyInit().Options; f != nil {
return f()
}
return descopts.Field
}
-func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number }
-func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
-func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind }
-func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON }
-func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) }
-func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) }
-func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated }
+func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number }
+func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality }
+func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind }
+func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON }
+func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) }
+func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) }
+func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated }
func (xd *Extension) HasOptionalKeyword() bool {
- return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional
-}
-func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
-func (xd *Extension) IsExtension() bool { return true }
-func (xd *Extension) IsWeak() bool { return false }
-func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated }
-func (xd *Extension) IsMap() bool { return false }
-func (xd *Extension) MapKey() pref.FieldDescriptor { return nil }
-func (xd *Extension) MapValue() pref.FieldDescriptor { return nil }
-func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has }
-func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) }
-func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum }
-func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil }
-func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee }
-func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum }
-func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message }
-func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) }
-func (xd *Extension) ProtoType(pref.FieldDescriptor) {}
-func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {}
+ return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional
+}
+func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
+func (xd *Extension) IsExtension() bool { return true }
+func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
+func (xd *Extension) IsMap() bool { return false }
+func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }
+func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil }
+func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has }
+func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) }
+func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor {
+ return xd.lazyInit().Default.enum
+}
+func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil }
+func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee }
+func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum }
+func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message }
+func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) }
+func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {}
+func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {}
func (xd *Extension) lazyInit() *ExtensionL2 {
xd.L0.ParentFile.lazyInit() // implicitly initializes L2
return xd.L2
@@ -421,7 +423,7 @@ type (
}
ServiceL1 struct{}
ServiceL2 struct {
- Options func() pref.ProtoMessage
+ Options func() protoreflect.ProtoMessage
Methods Methods
}
@@ -430,48 +432,48 @@ type (
L1 MethodL1
}
MethodL1 struct {
- Options func() pref.ProtoMessage
- Input pref.MessageDescriptor
- Output pref.MessageDescriptor
+ Options func() protoreflect.ProtoMessage
+ Input protoreflect.MessageDescriptor
+ Output protoreflect.MessageDescriptor
IsStreamingClient bool
IsStreamingServer bool
}
)
-func (sd *Service) Options() pref.ProtoMessage {
+func (sd *Service) Options() protoreflect.ProtoMessage {
if f := sd.lazyInit().Options; f != nil {
return f()
}
return descopts.Service
}
-func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods }
-func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) }
-func (sd *Service) ProtoType(pref.ServiceDescriptor) {}
-func (sd *Service) ProtoInternal(pragma.DoNotImplement) {}
+func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods }
+func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) }
+func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {}
+func (sd *Service) ProtoInternal(pragma.DoNotImplement) {}
func (sd *Service) lazyInit() *ServiceL2 {
sd.L0.ParentFile.lazyInit() // implicitly initializes L2
return sd.L2
}
-func (md *Method) Options() pref.ProtoMessage {
+func (md *Method) Options() protoreflect.ProtoMessage {
if f := md.L1.Options; f != nil {
return f()
}
return descopts.Method
}
-func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input }
-func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output }
-func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient }
-func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer }
-func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
-func (md *Method) ProtoType(pref.MethodDescriptor) {}
-func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
+func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input }
+func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output }
+func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient }
+func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer }
+func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Method) ProtoType(protoreflect.MethodDescriptor) {}
+func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
// Surrogate files are can be used to create standalone descriptors
// where the syntax is only information derived from the parent file.
var (
- SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}}
- SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}}
+ SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
+ SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
)
type (
@@ -479,24 +481,24 @@ type (
L0 BaseL0
}
BaseL0 struct {
- FullName pref.FullName // must be populated
- ParentFile *File // must be populated
- Parent pref.Descriptor
+ FullName protoreflect.FullName // must be populated
+ ParentFile *File // must be populated
+ Parent protoreflect.Descriptor
Index int
}
)
-func (d *Base) Name() pref.Name { return d.L0.FullName.Name() }
-func (d *Base) FullName() pref.FullName { return d.L0.FullName }
-func (d *Base) ParentFile() pref.FileDescriptor {
+func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() }
+func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName }
+func (d *Base) ParentFile() protoreflect.FileDescriptor {
if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 {
return nil // surrogate files are not real parents
}
return d.L0.ParentFile
}
-func (d *Base) Parent() pref.Descriptor { return d.L0.Parent }
+func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent }
func (d *Base) Index() int { return d.L0.Index }
-func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() }
+func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() }
func (d *Base) IsPlaceholder() bool { return false }
func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
@@ -513,7 +515,7 @@ func (s *stringName) InitJSON(name string) {
s.nameJSON = name
}
-func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName {
+func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
s.once.Do(func() {
if fd.IsExtension() {
// For extensions, JSON and text are formatted the same way.
@@ -533,7 +535,7 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName {
// Format the text name.
s.nameText = string(fd.Name())
- if fd.Kind() == pref.GroupKind {
+ if fd.Kind() == protoreflect.GroupKind {
s.nameText = string(fd.Message().Name())
}
}
@@ -541,10 +543,10 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName {
return s
}
-func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON }
-func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText }
+func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON }
+func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText }
-func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
+func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue {
dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
if b, ok := v.Interface().([]byte); ok {
// Store a copy of the default bytes, so that we can detect
@@ -554,9 +556,9 @@ func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
return dv
}
-func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue {
- var evs pref.EnumValueDescriptors
- if k == pref.EnumKind {
+func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue {
+ var evs protoreflect.EnumValueDescriptors
+ if k == protoreflect.EnumKind {
// If the enum is declared within the same file, be careful not to
// blindly call the Values method, lest we bind ourselves in a deadlock.
if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf {
@@ -567,9 +569,9 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d
// If we are unable to resolve the enum dependency, use a placeholder
// enum value since we will not be able to parse the default value.
- if ed.IsPlaceholder() && pref.Name(b).IsValid() {
- v := pref.ValueOfEnum(0)
- ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b)))
+ if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() {
+ v := protoreflect.ValueOfEnum(0)
+ ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b)))
return DefaultValue(v, ev)
}
}
@@ -583,41 +585,41 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d
type defaultValue struct {
has bool
- val pref.Value
- enum pref.EnumValueDescriptor
+ val protoreflect.Value
+ enum protoreflect.EnumValueDescriptor
bytes []byte
}
-func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value {
+func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value {
// Return the zero value as the default if unpopulated.
if !dv.has {
- if fd.Cardinality() == pref.Repeated {
- return pref.Value{}
+ if fd.Cardinality() == protoreflect.Repeated {
+ return protoreflect.Value{}
}
switch fd.Kind() {
- case pref.BoolKind:
- return pref.ValueOfBool(false)
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
- return pref.ValueOfInt32(0)
- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
- return pref.ValueOfInt64(0)
- case pref.Uint32Kind, pref.Fixed32Kind:
- return pref.ValueOfUint32(0)
- case pref.Uint64Kind, pref.Fixed64Kind:
- return pref.ValueOfUint64(0)
- case pref.FloatKind:
- return pref.ValueOfFloat32(0)
- case pref.DoubleKind:
- return pref.ValueOfFloat64(0)
- case pref.StringKind:
- return pref.ValueOfString("")
- case pref.BytesKind:
- return pref.ValueOfBytes(nil)
- case pref.EnumKind:
+ case protoreflect.BoolKind:
+ return protoreflect.ValueOfBool(false)
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ return protoreflect.ValueOfInt32(0)
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return protoreflect.ValueOfInt64(0)
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ return protoreflect.ValueOfUint32(0)
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return protoreflect.ValueOfUint64(0)
+ case protoreflect.FloatKind:
+ return protoreflect.ValueOfFloat32(0)
+ case protoreflect.DoubleKind:
+ return protoreflect.ValueOfFloat64(0)
+ case protoreflect.StringKind:
+ return protoreflect.ValueOfString("")
+ case protoreflect.BytesKind:
+ return protoreflect.ValueOfBytes(nil)
+ case protoreflect.EnumKind:
if evs := fd.Enum().Values(); evs.Len() > 0 {
- return pref.ValueOfEnum(evs.Get(0).Number())
+ return protoreflect.ValueOfEnum(evs.Get(0).Number())
}
- return pref.ValueOfEnum(0)
+ return protoreflect.ValueOfEnum(0)
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 66e1fee522..4a1584c9d2 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// fileRaw is a data struct used when initializing a file descriptor from
@@ -95,7 +95,7 @@ func (fd *File) unmarshalSeed(b []byte) {
sb := getBuilder()
defer putBuilder(sb)
- var prevField pref.FieldNumber
+ var prevField protoreflect.FieldNumber
var numEnums, numMessages, numExtensions, numServices int
var posEnums, posMessages, posExtensions, posServices int
b0 := b
@@ -110,16 +110,16 @@ func (fd *File) unmarshalSeed(b []byte) {
case genid.FileDescriptorProto_Syntax_field_number:
switch string(v) {
case "proto2":
- fd.L1.Syntax = pref.Proto2
+ fd.L1.Syntax = protoreflect.Proto2
case "proto3":
- fd.L1.Syntax = pref.Proto3
+ fd.L1.Syntax = protoreflect.Proto3
default:
panic("invalid syntax")
}
case genid.FileDescriptorProto_Name_field_number:
fd.L1.Path = sb.MakeString(v)
case genid.FileDescriptorProto_Package_field_number:
- fd.L1.Package = pref.FullName(sb.MakeString(v))
+ fd.L1.Package = protoreflect.FullName(sb.MakeString(v))
case genid.FileDescriptorProto_EnumType_field_number:
if prevField != genid.FileDescriptorProto_EnumType_field_number {
if numEnums > 0 {
@@ -163,7 +163,7 @@ func (fd *File) unmarshalSeed(b []byte) {
// If syntax is missing, it is assumed to be proto2.
if fd.L1.Syntax == 0 {
- fd.L1.Syntax = pref.Proto2
+ fd.L1.Syntax = protoreflect.Proto2
}
// Must allocate all declarations before parsing each descriptor type
@@ -219,7 +219,7 @@ func (fd *File) unmarshalSeed(b []byte) {
}
}
-func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
ed.L0.ParentFile = pf
ed.L0.Parent = pd
ed.L0.Index = i
@@ -271,12 +271,12 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc
}
}
-func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
md.L0.ParentFile = pf
md.L0.Parent = pd
md.L0.Index = i
- var prevField pref.FieldNumber
+ var prevField protoreflect.FieldNumber
var numEnums, numMessages, numExtensions int
var posEnums, posMessages, posExtensions int
b0 := b
@@ -387,7 +387,7 @@ func (md *Message) unmarshalSeedOptions(b []byte) {
}
}
-func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
xd.L0.ParentFile = pf
xd.L0.Parent = pd
xd.L0.Index = i
@@ -401,11 +401,11 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref
b = b[m:]
switch num {
case genid.FieldDescriptorProto_Number_field_number:
- xd.L1.Number = pref.FieldNumber(v)
+ xd.L1.Number = protoreflect.FieldNumber(v)
case genid.FieldDescriptorProto_Label_field_number:
- xd.L1.Cardinality = pref.Cardinality(v)
+ xd.L1.Cardinality = protoreflect.Cardinality(v)
case genid.FieldDescriptorProto_Type_field_number:
- xd.L1.Kind = pref.Kind(v)
+ xd.L1.Kind = protoreflect.Kind(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -423,7 +423,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref
}
}
-func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
sd.L0.ParentFile = pf
sd.L0.Parent = pd
sd.L0.Index = i
@@ -459,13 +459,13 @@ func putBuilder(b *strs.Builder) {
// makeFullName converts b to a protoreflect.FullName,
// where b must start with a leading dot.
-func makeFullName(sb *strs.Builder, b []byte) pref.FullName {
+func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName {
if len(b) == 0 || b[0] != '.' {
panic("name reference must be fully qualified")
}
- return pref.FullName(sb.MakeString(b[1:]))
+ return protoreflect.FullName(sb.MakeString(b[1:]))
}
-func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName {
- return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix)))
+func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName {
+ return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix)))
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index 198451e3ec..736a19a75b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -13,7 +13,7 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
func (fd *File) lazyRawInit() {
@@ -39,10 +39,10 @@ func (file *File) resolveMessages() {
// Resolve message field dependency.
switch fd.L1.Kind {
- case pref.EnumKind:
+ case protoreflect.EnumKind:
fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx)
depIdx++
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
depIdx++
}
@@ -62,10 +62,10 @@ func (file *File) resolveExtensions() {
// Resolve extension field dependency.
switch xd.L1.Kind {
- case pref.EnumKind:
+ case protoreflect.EnumKind:
xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx)
depIdx++
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx)
depIdx++
}
@@ -92,7 +92,7 @@ func (file *File) resolveServices() {
}
}
-func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor {
+func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor {
r := file.builder.FileRegistry
if r, ok := r.(resolverByIndex); ok {
if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil {
@@ -105,12 +105,12 @@ func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref
}
}
if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil {
- return d.(pref.EnumDescriptor)
+ return d.(protoreflect.EnumDescriptor)
}
return ed
}
-func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor {
+func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor {
r := file.builder.FileRegistry
if r, ok := r.(resolverByIndex); ok {
if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil {
@@ -123,7 +123,7 @@ func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32
}
}
if d, _ := r.FindDescriptorByName(md.FullName()); d != nil {
- return d.(pref.MessageDescriptor)
+ return d.(protoreflect.MessageDescriptor)
}
return md
}
@@ -158,7 +158,7 @@ func (fd *File) unmarshalFull(b []byte) {
if imp == nil {
imp = PlaceholderFile(path)
}
- fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp})
+ fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
case genid.FileDescriptorProto_EnumType_field_number:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
@@ -199,7 +199,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
case genid.EnumDescriptorProto_Value_field_number:
rawValues = append(rawValues, v)
case genid.EnumDescriptorProto_ReservedName_field_number:
- ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v)))
case genid.EnumDescriptorProto_ReservedRange_field_number:
ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v))
case genid.EnumDescriptorProto_Options_field_number:
@@ -219,7 +219,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions)
}
-func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) {
+func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
@@ -229,9 +229,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) {
b = b[m:]
switch num {
case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number:
- r[0] = pref.EnumNumber(v)
+ r[0] = protoreflect.EnumNumber(v)
case genid.EnumDescriptorProto_EnumReservedRange_End_field_number:
- r[1] = pref.EnumNumber(v)
+ r[1] = protoreflect.EnumNumber(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
@@ -241,7 +241,7 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) {
return r
}
-func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
vd.L0.ParentFile = pf
vd.L0.Parent = pd
vd.L0.Index = i
@@ -256,7 +256,7 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref
b = b[m:]
switch num {
case genid.EnumValueDescriptorProto_Number_field_number:
- vd.L1.Number = pref.EnumNumber(v)
+ vd.L1.Number = protoreflect.EnumNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -294,7 +294,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
case genid.DescriptorProto_OneofDecl_field_number:
rawOneofs = append(rawOneofs, v)
case genid.DescriptorProto_ReservedName_field_number:
- md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v)))
case genid.DescriptorProto_ReservedRange_field_number:
md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v))
case genid.DescriptorProto_ExtensionRange_field_number:
@@ -326,7 +326,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
for i, b := range rawFields {
fd := &md.L2.Fields.List[i]
fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
- if fd.L1.Cardinality == pref.Required {
+ if fd.L1.Cardinality == protoreflect.Required {
md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number)
}
}
@@ -359,7 +359,7 @@ func (md *Message) unmarshalOptions(b []byte) {
}
}
-func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) {
+func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
@@ -369,9 +369,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) {
b = b[m:]
switch num {
case genid.DescriptorProto_ReservedRange_Start_field_number:
- r[0] = pref.FieldNumber(v)
+ r[0] = protoreflect.FieldNumber(v)
case genid.DescriptorProto_ReservedRange_End_field_number:
- r[1] = pref.FieldNumber(v)
+ r[1] = protoreflect.FieldNumber(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
@@ -381,7 +381,7 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) {
return r
}
-func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) {
+func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
@@ -391,9 +391,9 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions
b = b[m:]
switch num {
case genid.DescriptorProto_ExtensionRange_Start_field_number:
- r[0] = pref.FieldNumber(v)
+ r[0] = protoreflect.FieldNumber(v)
case genid.DescriptorProto_ExtensionRange_End_field_number:
- r[1] = pref.FieldNumber(v)
+ r[1] = protoreflect.FieldNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -410,7 +410,7 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions
return r, rawOptions
}
-func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
fd.L0.ParentFile = pf
fd.L0.Parent = pd
fd.L0.Index = i
@@ -426,11 +426,11 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des
b = b[m:]
switch num {
case genid.FieldDescriptorProto_Number_field_number:
- fd.L1.Number = pref.FieldNumber(v)
+ fd.L1.Number = protoreflect.FieldNumber(v)
case genid.FieldDescriptorProto_Label_field_number:
- fd.L1.Cardinality = pref.Cardinality(v)
+ fd.L1.Cardinality = protoreflect.Cardinality(v)
case genid.FieldDescriptorProto_Type_field_number:
- fd.L1.Kind = pref.Kind(v)
+ fd.L1.Kind = protoreflect.Kind(v)
case genid.FieldDescriptorProto_OneofIndex_field_number:
// In Message.unmarshalFull, we allocate slices for both
// the field and oneof descriptors before unmarshaling either
@@ -453,7 +453,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des
case genid.FieldDescriptorProto_JsonName_field_number:
fd.L1.StringName.InitJSON(sb.MakeString(v))
case genid.FieldDescriptorProto_DefaultValue_field_number:
- fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
+ fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
case genid.FieldDescriptorProto_Options_field_number:
@@ -468,9 +468,9 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch fd.L1.Kind {
- case pref.EnumKind:
+ case protoreflect.EnumKind:
fd.L1.Enum = PlaceholderEnum(name)
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
fd.L1.Message = PlaceholderMessage(name)
}
}
@@ -504,7 +504,7 @@ func (fd *Field) unmarshalOptions(b []byte) {
}
}
-func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
od.L0.ParentFile = pf
od.L0.Parent = pd
od.L0.Index = i
@@ -553,7 +553,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
case genid.FieldDescriptorProto_JsonName_field_number:
xd.L2.StringName.InitJSON(sb.MakeString(v))
case genid.FieldDescriptorProto_DefaultValue_field_number:
- xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
+ xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
case genid.FieldDescriptorProto_Options_field_number:
@@ -568,9 +568,9 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch xd.L1.Kind {
- case pref.EnumKind:
+ case protoreflect.EnumKind:
xd.L2.Enum = PlaceholderEnum(name)
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
xd.L2.Message = PlaceholderMessage(name)
}
}
@@ -627,7 +627,7 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions)
}
-func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
md.L0.ParentFile = pf
md.L0.Parent = pd
md.L0.Index = i
@@ -680,18 +680,18 @@ func appendOptions(dst, src []byte) []byte {
//
// The type of message to unmarshal to is passed as a pointer since the
// vars in descopts may not yet be populated at the time this function is called.
-func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage {
+func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage {
if b == nil {
return nil
}
- var opts pref.ProtoMessage
+ var opts protoreflect.ProtoMessage
var once sync.Once
- return func() pref.ProtoMessage {
+ return func() protoreflect.ProtoMessage {
once.Do(func() {
if *p == nil {
panic("Descriptor.Options called without importing the descriptor package")
}
- opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage)
+ opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage)
if err := (proto.UnmarshalOptions{
AllowPartial: true,
Resolver: db.TypeResolver,
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
index aa294fff99..e3b6587da6 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
@@ -17,31 +17,30 @@ import (
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
)
-type FileImports []pref.FileImport
+type FileImports []protoreflect.FileImport
func (p *FileImports) Len() int { return len(*p) }
-func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] }
+func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] }
func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {}
type Names struct {
- List []pref.Name
+ List []protoreflect.Name
once sync.Once
- has map[pref.Name]int // protected by once
+ has map[protoreflect.Name]int // protected by once
}
func (p *Names) Len() int { return len(p.List) }
-func (p *Names) Get(i int) pref.Name { return p.List[i] }
-func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 }
+func (p *Names) Get(i int) protoreflect.Name { return p.List[i] }
+func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 }
func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *Names) ProtoInternal(pragma.DoNotImplement) {}
func (p *Names) lazyInit() *Names {
p.once.Do(func() {
if len(p.List) > 0 {
- p.has = make(map[pref.Name]int, len(p.List))
+ p.has = make(map[protoreflect.Name]int, len(p.List))
for _, s := range p.List {
p.has[s] = p.has[s] + 1
}
@@ -67,14 +66,14 @@ func (p *Names) CheckValid() error {
}
type EnumRanges struct {
- List [][2]pref.EnumNumber // start inclusive; end inclusive
+ List [][2]protoreflect.EnumNumber // start inclusive; end inclusive
once sync.Once
- sorted [][2]pref.EnumNumber // protected by once
+ sorted [][2]protoreflect.EnumNumber // protected by once
}
-func (p *EnumRanges) Len() int { return len(p.List) }
-func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] }
-func (p *EnumRanges) Has(n pref.EnumNumber) bool {
+func (p *EnumRanges) Len() int { return len(p.List) }
+func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] }
+func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool {
for ls := p.lazyInit().sorted; len(ls) > 0; {
i := len(ls) / 2
switch r := enumRange(ls[i]); {
@@ -129,14 +128,14 @@ func (r enumRange) String() string {
}
type FieldRanges struct {
- List [][2]pref.FieldNumber // start inclusive; end exclusive
+ List [][2]protoreflect.FieldNumber // start inclusive; end exclusive
once sync.Once
- sorted [][2]pref.FieldNumber // protected by once
+ sorted [][2]protoreflect.FieldNumber // protected by once
}
-func (p *FieldRanges) Len() int { return len(p.List) }
-func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] }
-func (p *FieldRanges) Has(n pref.FieldNumber) bool {
+func (p *FieldRanges) Len() int { return len(p.List) }
+func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] }
+func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool {
for ls := p.lazyInit().sorted; len(ls) > 0; {
i := len(ls) / 2
switch r := fieldRange(ls[i]); {
@@ -221,17 +220,17 @@ func (r fieldRange) String() string {
}
type FieldNumbers struct {
- List []pref.FieldNumber
+ List []protoreflect.FieldNumber
once sync.Once
- has map[pref.FieldNumber]struct{} // protected by once
+ has map[protoreflect.FieldNumber]struct{} // protected by once
}
-func (p *FieldNumbers) Len() int { return len(p.List) }
-func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] }
-func (p *FieldNumbers) Has(n pref.FieldNumber) bool {
+func (p *FieldNumbers) Len() int { return len(p.List) }
+func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] }
+func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool {
p.once.Do(func() {
if len(p.List) > 0 {
- p.has = make(map[pref.FieldNumber]struct{}, len(p.List))
+ p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List))
for _, n := range p.List {
p.has[n] = struct{}{}
}
@@ -244,30 +243,38 @@ func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList
func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {}
type OneofFields struct {
- List []pref.FieldDescriptor
+ List []protoreflect.FieldDescriptor
once sync.Once
- byName map[pref.Name]pref.FieldDescriptor // protected by once
- byJSON map[string]pref.FieldDescriptor // protected by once
- byText map[string]pref.FieldDescriptor // protected by once
- byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once
+ byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once
+ byJSON map[string]protoreflect.FieldDescriptor // protected by once
+ byText map[string]protoreflect.FieldDescriptor // protected by once
+ byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once
}
-func (p *OneofFields) Len() int { return len(p.List) }
-func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] }
-func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] }
-func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] }
-func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] }
-func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] }
-func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
-func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
+func (p *OneofFields) Len() int { return len(p.List) }
+func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] }
+func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
+ return p.lazyInit().byName[s]
+}
+func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor {
+ return p.lazyInit().byJSON[s]
+}
+func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor {
+ return p.lazyInit().byText[s]
+}
+func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
+ return p.lazyInit().byNum[n]
+}
+func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
func (p *OneofFields) lazyInit() *OneofFields {
p.once.Do(func() {
if len(p.List) > 0 {
- p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List))
- p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List))
- p.byText = make(map[string]pref.FieldDescriptor, len(p.List))
- p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List))
+ p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List))
+ p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List))
+ p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List))
+ p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List))
for _, f := range p.List {
// Field names and numbers are guaranteed to be unique.
p.byName[f.Name()] = f
@@ -284,123 +291,123 @@ type SourceLocations struct {
// List is a list of SourceLocations.
// The SourceLocation.Next field does not need to be populated
// as it will be lazily populated upon first need.
- List []pref.SourceLocation
+ List []protoreflect.SourceLocation
// File is the parent file descriptor that these locations are relative to.
// If non-nil, ByDescriptor verifies that the provided descriptor
// is a child of this file descriptor.
- File pref.FileDescriptor
+ File protoreflect.FileDescriptor
once sync.Once
byPath map[pathKey]int
}
-func (p *SourceLocations) Len() int { return len(p.List) }
-func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] }
-func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation {
+func (p *SourceLocations) Len() int { return len(p.List) }
+func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] }
+func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation {
if i, ok := p.lazyInit().byPath[k]; ok {
return p.List[i]
}
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
-func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation {
+func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation {
return p.byKey(newPathKey(path))
}
-func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation {
+func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation {
if p.File != nil && desc != nil && p.File != desc.ParentFile() {
- return pref.SourceLocation{} // mismatching parent files
+ return protoreflect.SourceLocation{} // mismatching parent files
}
var pathArr [16]int32
path := pathArr[:0]
for {
switch desc.(type) {
- case pref.FileDescriptor:
+ case protoreflect.FileDescriptor:
// Reverse the path since it was constructed in reverse.
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
path[i], path[j] = path[j], path[i]
}
return p.byKey(newPathKey(path))
- case pref.MessageDescriptor:
+ case protoreflect.MessageDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
- case pref.FileDescriptor:
+ case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number))
- case pref.MessageDescriptor:
+ case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_NestedType_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
- case pref.FieldDescriptor:
- isExtension := desc.(pref.FieldDescriptor).IsExtension()
+ case protoreflect.FieldDescriptor:
+ isExtension := desc.(protoreflect.FieldDescriptor).IsExtension()
path = append(path, int32(desc.Index()))
desc = desc.Parent()
if isExtension {
switch desc.(type) {
- case pref.FileDescriptor:
+ case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_Extension_field_number))
- case pref.MessageDescriptor:
+ case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_Extension_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
} else {
switch desc.(type) {
- case pref.MessageDescriptor:
+ case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_Field_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
}
- case pref.OneofDescriptor:
+ case protoreflect.OneofDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
- case pref.MessageDescriptor:
+ case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
- case pref.EnumDescriptor:
+ case protoreflect.EnumDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
- case pref.FileDescriptor:
+ case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number))
- case pref.MessageDescriptor:
+ case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_EnumType_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
- case pref.EnumValueDescriptor:
+ case protoreflect.EnumValueDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
- case pref.EnumDescriptor:
+ case protoreflect.EnumDescriptor:
path = append(path, int32(genid.EnumDescriptorProto_Value_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
- case pref.ServiceDescriptor:
+ case protoreflect.ServiceDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
- case pref.FileDescriptor:
+ case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_Service_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
- case pref.MethodDescriptor:
+ case protoreflect.MethodDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
- case pref.ServiceDescriptor:
+ case protoreflect.ServiceDescriptor:
path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number))
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
default:
- return pref.SourceLocation{}
+ return protoreflect.SourceLocation{}
}
}
}
@@ -435,7 +442,7 @@ type pathKey struct {
str string // used if the path does not fit in arr
}
-func newPathKey(p pref.SourcePath) (k pathKey) {
+func newPathKey(p protoreflect.SourcePath) (k pathKey) {
if len(p) < len(k.arr) {
for i, ps := range p {
if ps < 0 || math.MaxUint8 <= ps {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
index dbf2c605bf..28240ebc5c 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
@@ -7,7 +7,7 @@ package filedesc
import (
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/pragma"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
var (
@@ -30,78 +30,80 @@ var (
// PlaceholderFile is a placeholder, representing only the file path.
type PlaceholderFile string
-func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f }
-func (f PlaceholderFile) Parent() pref.Descriptor { return nil }
-func (f PlaceholderFile) Index() int { return 0 }
-func (f PlaceholderFile) Syntax() pref.Syntax { return 0 }
-func (f PlaceholderFile) Name() pref.Name { return "" }
-func (f PlaceholderFile) FullName() pref.FullName { return "" }
-func (f PlaceholderFile) IsPlaceholder() bool { return true }
-func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File }
-func (f PlaceholderFile) Path() string { return string(f) }
-func (f PlaceholderFile) Package() pref.FullName { return "" }
-func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles }
-func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages }
-func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums }
-func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
-func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices }
-func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations }
-func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return }
-func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return }
+func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f }
+func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil }
+func (f PlaceholderFile) Index() int { return 0 }
+func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 }
+func (f PlaceholderFile) Name() protoreflect.Name { return "" }
+func (f PlaceholderFile) FullName() protoreflect.FullName { return "" }
+func (f PlaceholderFile) IsPlaceholder() bool { return true }
+func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File }
+func (f PlaceholderFile) Path() string { return string(f) }
+func (f PlaceholderFile) Package() protoreflect.FullName { return "" }
+func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles }
+func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages }
+func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums }
+func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions }
+func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices }
+func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations }
+func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return }
+func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderEnum is a placeholder, representing only the full name.
-type PlaceholderEnum pref.FullName
+type PlaceholderEnum protoreflect.FullName
-func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil }
-func (e PlaceholderEnum) Parent() pref.Descriptor { return nil }
-func (e PlaceholderEnum) Index() int { return 0 }
-func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 }
-func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() }
-func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) }
-func (e PlaceholderEnum) IsPlaceholder() bool { return true }
-func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum }
-func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues }
-func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames }
-func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges }
-func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return }
-func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
+func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil }
+func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil }
+func (e PlaceholderEnum) Index() int { return 0 }
+func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 }
+func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() }
+func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) }
+func (e PlaceholderEnum) IsPlaceholder() bool { return true }
+func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum }
+func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues }
+func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames }
+func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges }
+func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return }
+func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderEnumValue is a placeholder, representing only the full name.
-type PlaceholderEnumValue pref.FullName
+type PlaceholderEnumValue protoreflect.FullName
-func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil }
-func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil }
-func (e PlaceholderEnumValue) Index() int { return 0 }
-func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 }
-func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() }
-func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) }
-func (e PlaceholderEnumValue) IsPlaceholder() bool { return true }
-func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue }
-func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 }
-func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return }
-func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return }
+func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil }
+func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil }
+func (e PlaceholderEnumValue) Index() int { return 0 }
+func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 }
+func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() }
+func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) }
+func (e PlaceholderEnumValue) IsPlaceholder() bool { return true }
+func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue }
+func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 }
+func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return }
+func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderMessage is a placeholder, representing only the full name.
-type PlaceholderMessage pref.FullName
+type PlaceholderMessage protoreflect.FullName
-func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil }
-func (m PlaceholderMessage) Parent() pref.Descriptor { return nil }
-func (m PlaceholderMessage) Index() int { return 0 }
-func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 }
-func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() }
-func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) }
-func (m PlaceholderMessage) IsPlaceholder() bool { return true }
-func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message }
-func (m PlaceholderMessage) IsMapEntry() bool { return false }
-func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields }
-func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs }
-func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames }
-func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges }
-func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers }
-func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges }
-func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") }
-func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages }
-func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums }
-func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
-func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return }
-func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return }
+func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil }
+func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil }
+func (m PlaceholderMessage) Index() int { return 0 }
+func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 }
+func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() }
+func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) }
+func (m PlaceholderMessage) IsPlaceholder() bool { return true }
+func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message }
+func (m PlaceholderMessage) IsMapEntry() bool { return false }
+func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields }
+func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs }
+func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames }
+func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers }
+func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage {
+ panic("index out of range")
+}
+func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages }
+func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums }
+func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions }
+func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return }
+func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
index 0a0dd35de5..f0e38c4ef4 100644
--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -10,17 +10,16 @@ import (
"reflect"
"google.golang.org/protobuf/internal/descopts"
- fdesc "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/filedesc"
pimpl "google.golang.org/protobuf/internal/impl"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
)
// Builder constructs type descriptors from a raw file descriptor
// and associated Go types for each enum and message declaration.
//
-//
-// Flattened Ordering
+// # Flattened Ordering
//
// The protobuf type system represents declarations as a tree. Certain nodes in
// the tree require us to either associate it with a concrete Go type or to
@@ -52,7 +51,7 @@ import (
// that children themselves may have.
type Builder struct {
// File is the underlying file descriptor builder.
- File fdesc.Builder
+ File filedesc.Builder
// GoTypes is a unique set of the Go types for all declarations and
// dependencies. Each type is represented as a zero value of the Go type.
@@ -108,22 +107,22 @@ type Builder struct {
// TypeRegistry is the registry to register each type descriptor.
// If nil, it uses protoregistry.GlobalTypes.
TypeRegistry interface {
- RegisterMessage(pref.MessageType) error
- RegisterEnum(pref.EnumType) error
- RegisterExtension(pref.ExtensionType) error
+ RegisterMessage(protoreflect.MessageType) error
+ RegisterEnum(protoreflect.EnumType) error
+ RegisterExtension(protoreflect.ExtensionType) error
}
}
// Out is the output of the builder.
type Out struct {
- File pref.FileDescriptor
+ File protoreflect.FileDescriptor
}
func (tb Builder) Build() (out Out) {
// Replace the resolver with one that resolves dependencies by index,
// which is faster and more reliable than relying on the global registry.
if tb.File.FileRegistry == nil {
- tb.File.FileRegistry = preg.GlobalFiles
+ tb.File.FileRegistry = protoregistry.GlobalFiles
}
tb.File.FileRegistry = &resolverByIndex{
goTypes: tb.GoTypes,
@@ -133,7 +132,7 @@ func (tb Builder) Build() (out Out) {
// Initialize registry if unpopulated.
if tb.TypeRegistry == nil {
- tb.TypeRegistry = preg.GlobalTypes
+ tb.TypeRegistry = protoregistry.GlobalTypes
}
fbOut := tb.File.Build()
@@ -183,23 +182,23 @@ func (tb Builder) Build() (out Out) {
for i := range fbOut.Messages {
switch fbOut.Messages[i].Name() {
case "FileOptions":
- descopts.File = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage)
case "EnumOptions":
- descopts.Enum = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage)
case "EnumValueOptions":
- descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage)
case "MessageOptions":
- descopts.Message = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage)
case "FieldOptions":
- descopts.Field = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage)
case "OneofOptions":
- descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage)
case "ExtensionRangeOptions":
- descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage)
case "ServiceOptions":
- descopts.Service = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage)
case "MethodOptions":
- descopts.Method = messageGoTypes[i].(pref.ProtoMessage)
+ descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage)
}
}
}
@@ -216,11 +215,11 @@ func (tb Builder) Build() (out Out) {
const listExtDeps = 2
var goType reflect.Type
switch fbOut.Extensions[i].L1.Kind {
- case pref.EnumKind:
+ case protoreflect.EnumKind:
j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
goType = reflect.TypeOf(tb.GoTypes[j])
depIdx++
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
goType = reflect.TypeOf(tb.GoTypes[j])
depIdx++
@@ -242,22 +241,22 @@ func (tb Builder) Build() (out Out) {
return out
}
-var goTypeForPBKind = map[pref.Kind]reflect.Type{
- pref.BoolKind: reflect.TypeOf(bool(false)),
- pref.Int32Kind: reflect.TypeOf(int32(0)),
- pref.Sint32Kind: reflect.TypeOf(int32(0)),
- pref.Sfixed32Kind: reflect.TypeOf(int32(0)),
- pref.Int64Kind: reflect.TypeOf(int64(0)),
- pref.Sint64Kind: reflect.TypeOf(int64(0)),
- pref.Sfixed64Kind: reflect.TypeOf(int64(0)),
- pref.Uint32Kind: reflect.TypeOf(uint32(0)),
- pref.Fixed32Kind: reflect.TypeOf(uint32(0)),
- pref.Uint64Kind: reflect.TypeOf(uint64(0)),
- pref.Fixed64Kind: reflect.TypeOf(uint64(0)),
- pref.FloatKind: reflect.TypeOf(float32(0)),
- pref.DoubleKind: reflect.TypeOf(float64(0)),
- pref.StringKind: reflect.TypeOf(string("")),
- pref.BytesKind: reflect.TypeOf([]byte(nil)),
+var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{
+ protoreflect.BoolKind: reflect.TypeOf(bool(false)),
+ protoreflect.Int32Kind: reflect.TypeOf(int32(0)),
+ protoreflect.Sint32Kind: reflect.TypeOf(int32(0)),
+ protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)),
+ protoreflect.Int64Kind: reflect.TypeOf(int64(0)),
+ protoreflect.Sint64Kind: reflect.TypeOf(int64(0)),
+ protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)),
+ protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)),
+ protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)),
+ protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)),
+ protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)),
+ protoreflect.FloatKind: reflect.TypeOf(float32(0)),
+ protoreflect.DoubleKind: reflect.TypeOf(float64(0)),
+ protoreflect.StringKind: reflect.TypeOf(string("")),
+ protoreflect.BytesKind: reflect.TypeOf([]byte(nil)),
}
type depIdxs []int32
@@ -274,13 +273,13 @@ type (
fileRegistry
}
fileRegistry interface {
- FindFileByPath(string) (pref.FileDescriptor, error)
- FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
- RegisterFile(pref.FileDescriptor) error
+ FindFileByPath(string) (protoreflect.FileDescriptor, error)
+ FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
+ RegisterFile(protoreflect.FileDescriptor) error
}
)
-func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor {
+func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor {
if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) {
return &es[depIdx]
} else {
@@ -288,7 +287,7 @@ func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdes
}
}
-func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor {
+func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor {
if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) {
return &ms[depIdx-len(es)]
} else {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
index abee5f30e9..a371f98de1 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -12,8 +12,8 @@ import (
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// Export is a zero-length named type that exists only to export a set of
@@ -32,11 +32,11 @@ type enum = interface{}
// EnumOf returns the protoreflect.Enum interface over e.
// It returns nil if e is nil.
-func (Export) EnumOf(e enum) pref.Enum {
+func (Export) EnumOf(e enum) protoreflect.Enum {
switch e := e.(type) {
case nil:
return nil
- case pref.Enum:
+ case protoreflect.Enum:
return e
default:
return legacyWrapEnum(reflect.ValueOf(e))
@@ -45,11 +45,11 @@ func (Export) EnumOf(e enum) pref.Enum {
// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e.
// It returns nil if e is nil.
-func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor {
+func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor {
switch e := e.(type) {
case nil:
return nil
- case pref.Enum:
+ case protoreflect.Enum:
return e.Descriptor()
default:
return LegacyLoadEnumDesc(reflect.TypeOf(e))
@@ -58,11 +58,11 @@ func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor {
// EnumTypeOf returns the protoreflect.EnumType for e.
// It returns nil if e is nil.
-func (Export) EnumTypeOf(e enum) pref.EnumType {
+func (Export) EnumTypeOf(e enum) protoreflect.EnumType {
switch e := e.(type) {
case nil:
return nil
- case pref.Enum:
+ case protoreflect.Enum:
return e.Type()
default:
return legacyLoadEnumType(reflect.TypeOf(e))
@@ -71,7 +71,7 @@ func (Export) EnumTypeOf(e enum) pref.EnumType {
// EnumStringOf returns the enum value as a string, either as the name if
// the number is resolvable, or the number formatted as a string.
-func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string {
+func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string {
ev := ed.Values().ByNumber(n)
if ev != nil {
return string(ev.Name())
@@ -84,7 +84,7 @@ func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string {
type message = interface{}
// legacyMessageWrapper wraps a v2 message as a v1 message.
-type legacyMessageWrapper struct{ m pref.ProtoMessage }
+type legacyMessageWrapper struct{ m protoreflect.ProtoMessage }
func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) }
func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) }
@@ -92,30 +92,30 @@ func (m legacyMessageWrapper) ProtoMessage() {}
// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message.
// It returns nil if m is nil.
-func (Export) ProtoMessageV1Of(m message) piface.MessageV1 {
+func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 {
switch mv := m.(type) {
case nil:
return nil
- case piface.MessageV1:
+ case protoiface.MessageV1:
return mv
case unwrapper:
return Export{}.ProtoMessageV1Of(mv.protoUnwrap())
- case pref.ProtoMessage:
+ case protoreflect.ProtoMessage:
return legacyMessageWrapper{mv}
default:
panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
}
}
-func (Export) protoMessageV2Of(m message) pref.ProtoMessage {
+func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage {
switch mv := m.(type) {
case nil:
return nil
- case pref.ProtoMessage:
+ case protoreflect.ProtoMessage:
return mv
case legacyMessageWrapper:
return mv.m
- case piface.MessageV1:
+ case protoiface.MessageV1:
return nil
default:
panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
@@ -124,7 +124,7 @@ func (Export) protoMessageV2Of(m message) pref.ProtoMessage {
// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message.
// It returns nil if m is nil.
-func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage {
+func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage {
if m == nil {
return nil
}
@@ -136,7 +136,7 @@ func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage {
// MessageOf returns the protoreflect.Message interface over m.
// It returns nil if m is nil.
-func (Export) MessageOf(m message) pref.Message {
+func (Export) MessageOf(m message) protoreflect.Message {
if m == nil {
return nil
}
@@ -148,7 +148,7 @@ func (Export) MessageOf(m message) pref.Message {
// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m.
// It returns nil if m is nil.
-func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor {
+func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor {
if m == nil {
return nil
}
@@ -160,7 +160,7 @@ func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor {
// MessageTypeOf returns the protoreflect.MessageType for m.
// It returns nil if m is nil.
-func (Export) MessageTypeOf(m message) pref.MessageType {
+func (Export) MessageTypeOf(m message) protoreflect.MessageType {
if m == nil {
return nil
}
@@ -172,6 +172,6 @@ func (Export) MessageTypeOf(m message) pref.MessageType {
// MessageStringOf returns the message value as a string,
// which is the message serialized in the protobuf text format.
-func (Export) MessageStringOf(m pref.ProtoMessage) string {
+func (Export) MessageStringOf(m protoreflect.ProtoMessage) string {
return prototext.MarshalOptions{Multiline: false}.Format(m)
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index b82341e575..bff041edc9 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -8,18 +8,18 @@ import (
"sync"
"google.golang.org/protobuf/internal/errors"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
-func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) {
+func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) {
var p pointer
if ms, ok := in.Message.(*messageState); ok {
p = ms.pointer()
} else {
p = in.Message.(*messageReflectWrapper).pointer()
}
- return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p)
+ return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p)
}
func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
@@ -90,7 +90,7 @@ var (
// needsInitCheck reports whether a message needs to be checked for partial initialization.
//
// It returns true if the message transitively includes any required or extension fields.
-func needsInitCheck(md pref.MessageDescriptor) bool {
+func needsInitCheck(md protoreflect.MessageDescriptor) bool {
if v, ok := needsInitCheckMap.Load(md); ok {
if has, ok := v.(bool); ok {
return has
@@ -101,7 +101,7 @@ func needsInitCheck(md pref.MessageDescriptor) bool {
return needsInitCheckLocked(md)
}
-func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) {
+func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) {
if v, ok := needsInitCheckMap.Load(md); ok {
// If has is true, we've previously determined that this message
// needs init checks.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 08d35170b6..e74cefdc50 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type extensionFieldInfo struct {
@@ -23,7 +23,7 @@ type extensionFieldInfo struct {
var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo
-func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo {
if xi, ok := xt.(*ExtensionInfo); ok {
xi.lazyInit()
return xi.info
@@ -32,7 +32,7 @@ func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
}
// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt.
-func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo {
if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok {
return xi.(*extensionFieldInfo)
}
@@ -43,7 +43,7 @@ func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
return e
}
-func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo {
+func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo {
var wiretag uint64
if !xd.IsPacked() {
wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()])
@@ -59,10 +59,10 @@ func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo {
// This is true for composite types, where we pass in a message, list, or map to fill in,
// and for enums, where we pass in a prototype value to specify the concrete enum type.
switch xd.Kind() {
- case pref.MessageKind, pref.GroupKind, pref.EnumKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind:
e.unmarshalNeedsValue = true
default:
- if xd.Cardinality() == pref.Repeated {
+ if xd.Cardinality() == protoreflect.Repeated {
e.unmarshalNeedsValue = true
}
}
@@ -73,21 +73,21 @@ type lazyExtensionValue struct {
atomicOnce uint32 // atomically set if value is valid
mu sync.Mutex
xi *extensionFieldInfo
- value pref.Value
+ value protoreflect.Value
b []byte
- fn func() pref.Value
+ fn func() protoreflect.Value
}
type ExtensionField struct {
- typ pref.ExtensionType
+ typ protoreflect.ExtensionType
// value is either the value of GetValue,
// or a *lazyExtensionValue that then returns the value of GetValue.
- value pref.Value
+ value protoreflect.Value
lazy *lazyExtensionValue
}
-func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) {
+func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) {
if f.lazy == nil {
f.lazy = &lazyExtensionValue{xi: xi}
}
@@ -97,7 +97,7 @@ func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFie
f.lazy.b = append(f.lazy.b, b...)
}
-func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool {
+func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool {
if f.typ == nil {
return true
}
@@ -154,7 +154,7 @@ func (f *ExtensionField) lazyInit() {
// Set sets the type and value of the extension field.
// This must not be called concurrently.
-func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) {
+func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) {
f.typ = t
f.value = v
f.lazy = nil
@@ -162,14 +162,14 @@ func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) {
// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) {
+func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
f.typ = t
f.lazy = &lazyExtensionValue{fn: fn}
}
// Value returns the value of the extension field.
// This may be called concurrently.
-func (f *ExtensionField) Value() pref.Value {
+func (f *ExtensionField) Value() protoreflect.Value {
if f.lazy != nil {
if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 {
f.lazyInit()
@@ -181,7 +181,7 @@ func (f *ExtensionField) Value() pref.Value {
// Type returns the type of the extension field.
// This may be called concurrently.
-func (f ExtensionField) Type() pref.ExtensionType {
+func (f ExtensionField) Type() protoreflect.ExtensionType {
return f.typ
}
@@ -193,7 +193,7 @@ func (f ExtensionField) IsSet() bool {
// IsLazy reports whether a field is lazily encoded.
// It is exported for testing.
-func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool {
+func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool {
var mi *MessageInfo
var p pointer
switch m := m.(type) {
@@ -206,7 +206,7 @@ func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool {
default:
return false
}
- xd, ok := fd.(pref.ExtensionTypeDescriptor)
+ xd, ok := fd.(protoreflect.ExtensionTypeDescriptor)
if !ok {
return false
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index cb4b482d16..3fadd241e1 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -12,9 +12,9 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
)
type errInvalidUTF8 struct{}
@@ -30,7 +30,7 @@ func (errInvalidUTF8) Unwrap() error { return errors.Error }
// to the appropriate field-specific function as necessary.
//
// The unmarshal function is set on each field individually as usual.
-func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) {
+func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) {
fs := si.oneofsByName[od.Name()]
ft := fs.Type
oneofFields := make(map[reflect.Type]*coderFieldInfo)
@@ -118,13 +118,13 @@ func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structIn
}
}
-func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs {
+func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs {
var once sync.Once
- var messageType pref.MessageType
+ var messageType protoreflect.MessageType
lazyInit := func() {
once.Do(func() {
messageName := fd.Message().FullName()
- messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
})
}
@@ -190,7 +190,7 @@ func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs {
}
}
-func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
size: sizeMessageInfo,
@@ -280,7 +280,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh
if n < 0 {
return out, errDecode
}
- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{
Buf: v,
Message: m.ProtoReflect(),
})
@@ -288,27 +288,27 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh
return out, err
}
out.n = n
- out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0
return out, nil
}
-func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int {
+func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
m := v.Message().Interface()
return sizeMessage(m, tagsize, opts)
}
-func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
m := v.Message().Interface()
return appendMessage(b, m, wiretag, opts)
}
-func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) {
m := v.Message().Interface()
out, err := consumeMessage(b, m, wtyp, opts)
return v, out, err
}
-func isInitMessageValue(v pref.Value) error {
+func isInitMessageValue(v protoreflect.Value) error {
m := v.Message().Interface()
return proto.CheckInitialized(m)
}
@@ -321,17 +321,17 @@ var coderMessageValue = valueCoderFuncs{
merge: mergeMessageValue,
}
-func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int {
+func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
m := v.Message().Interface()
return sizeGroup(m, tagsize, opts)
}
-func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
m := v.Message().Interface()
return appendGroup(b, m, wiretag, opts)
}
-func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) {
m := v.Message().Interface()
out, err := consumeGroup(b, m, num, wtyp, opts)
return v, out, err
@@ -345,7 +345,7 @@ var coderGroupValue = valueCoderFuncs{
merge: mergeMessageValue,
}
-func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
num := fd.Number()
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
@@ -424,7 +424,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir
if n < 0 {
return out, errDecode
}
- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{
Buf: b,
Message: m.ProtoReflect(),
})
@@ -432,11 +432,11 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir
return out, err
}
out.n = n
- out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0
return out, nil
}
-func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
size: sizeMessageSliceInfo,
@@ -555,7 +555,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir
return out, errDecode
}
mp := reflect.New(goType.Elem())
- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{
Buf: v,
Message: asMessage(mp).ProtoReflect(),
})
@@ -564,7 +564,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir
}
p.AppendPointerSlice(pointerOfValue(mp))
out.n = n
- out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0
return out, nil
}
@@ -581,7 +581,7 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error {
// Slices of messages
-func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
@@ -591,7 +591,7 @@ func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) i
return n
}
-func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
mopts := opts.Options()
for i, llen := 0, list.Len(); i < llen; i++ {
@@ -608,30 +608,30 @@ func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts ma
return b, nil
}
-func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp != protowire.BytesType {
- return pref.Value{}, out, errUnknown
+ return protoreflect.Value{}, out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
- return pref.Value{}, out, errDecode
+ return protoreflect.Value{}, out, errDecode
}
m := list.NewElement()
- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{
Buf: v,
Message: m.Message(),
})
if err != nil {
- return pref.Value{}, out, err
+ return protoreflect.Value{}, out, err
}
list.Append(m)
out.n = n
- out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0
return listv, out, nil
}
-func isInitMessageSliceValue(listv pref.Value) error {
+func isInitMessageSliceValue(listv protoreflect.Value) error {
list := listv.List()
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
@@ -650,7 +650,7 @@ var coderMessageSliceValue = valueCoderFuncs{
merge: mergeMessageListValue,
}
-func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
@@ -660,7 +660,7 @@ func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int
return n
}
-func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
list := listv.List()
mopts := opts.Options()
for i, llen := 0, list.Len(); i < llen; i++ {
@@ -676,26 +676,26 @@ func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts mars
return b, nil
}
-func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
list := listv.List()
if wtyp != protowire.StartGroupType {
- return pref.Value{}, out, errUnknown
+ return protoreflect.Value{}, out, errUnknown
}
b, n := protowire.ConsumeGroup(num, b)
if n < 0 {
- return pref.Value{}, out, errDecode
+ return protoreflect.Value{}, out, errDecode
}
m := list.NewElement()
- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{
Buf: b,
Message: m.Message(),
})
if err != nil {
- return pref.Value{}, out, err
+ return protoreflect.Value{}, out, err
}
list.Append(m)
out.n = n
- out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0
return listv, out, nil
}
@@ -707,7 +707,7 @@ var coderGroupSliceValue = valueCoderFuncs{
merge: mergeMessageListValue,
}
-func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
num := fd.Number()
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
@@ -772,7 +772,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire
return out, errDecode
}
mp := reflect.New(goType.Elem())
- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{
Buf: b,
Message: asMessage(mp).ProtoReflect(),
})
@@ -781,7 +781,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire
}
p.AppendPointerSlice(pointerOfValue(mp))
out.n = n
- out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0
return out, nil
}
@@ -822,8 +822,8 @@ func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFie
return out, nil
}
-func asMessage(v reflect.Value) pref.ProtoMessage {
- if m, ok := v.Interface().(pref.ProtoMessage); ok {
+func asMessage(v reflect.Value) protoreflect.ProtoMessage {
+ if m, ok := v.Interface().(protoreflect.ProtoMessage); ok {
return m
}
return legacyWrapMessage(v).Interface()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index c1245fef48..111b9d16f9 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/genid"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type mapInfo struct {
@@ -19,12 +19,12 @@ type mapInfo struct {
valWiretag uint64
keyFuncs valueCoderFuncs
valFuncs valueCoderFuncs
- keyZero pref.Value
- keyKind pref.Kind
+ keyZero protoreflect.Value
+ keyKind protoreflect.Kind
conv *mapConverter
}
-func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) {
+func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) {
// TODO: Consider generating specialized map coders.
keyField := fd.MapKey()
valField := fd.MapValue()
@@ -44,7 +44,7 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage
keyKind: keyField.Kind(),
conv: conv,
}
- if valField.Kind() == pref.MessageKind {
+ if valField.Kind() == protoreflect.MessageKind {
valueMessage = getMessageInfo(ft.Elem())
}
@@ -68,9 +68,9 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage
},
}
switch valField.Kind() {
- case pref.MessageKind:
+ case protoreflect.MessageKind:
funcs.merge = mergeMapOfMessage
- case pref.BytesKind:
+ case protoreflect.BytesKind:
funcs.merge = mergeMapOfBytes
default:
funcs.merge = mergeMap
@@ -135,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
err := errUnknown
switch num {
case genid.MapEntry_Key_field_number:
- var v pref.Value
+ var v protoreflect.Value
var o unmarshalOutput
v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
if err != nil {
@@ -144,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
key = v
n = o.n
case genid.MapEntry_Value_field_number:
- var v pref.Value
+ var v protoreflect.Value
var o unmarshalOutput
v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts)
if err != nil {
@@ -192,7 +192,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi
err := errUnknown
switch num {
case 1:
- var v pref.Value
+ var v protoreflect.Value
var o unmarshalOutput
v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
if err != nil {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index cd40527ff6..6b2fdbb739 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -12,15 +12,15 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/order"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// coderMessageInfo contains per-message information used by the fast-path functions.
// This is a different type from MessageInfo to keep MessageInfo as general-purpose as
// possible.
type coderMessageInfo struct {
- methods piface.Methods
+ methods protoiface.Methods
orderedCoderFields []*coderFieldInfo
denseCoderFields []*coderFieldInfo
@@ -38,13 +38,13 @@ type coderFieldInfo struct {
funcs pointerCoderFuncs // fast-path per-field functions
mi *MessageInfo // field's message
ft reflect.Type
- validation validationInfo // information used by message validation
- num pref.FieldNumber // field number
- offset offset // struct field offset
- wiretag uint64 // field tag (number + wire type)
- tagsize int // size of the varint-encoded tag
- isPointer bool // true if IsNil may be called on the struct field
- isRequired bool // true if field is required
+ validation validationInfo // information used by message validation
+ num protoreflect.FieldNumber // field number
+ offset offset // struct field offset
+ wiretag uint64 // field tag (number + wire type)
+ tagsize int // size of the varint-encoded tag
+ isPointer bool // true if IsNil may be called on the struct field
+ isRequired bool // true if field is required
}
func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
@@ -125,8 +125,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
funcs: funcs,
mi: childMessage,
validation: newFieldValidationInfo(mi, si, fd, ft),
- isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(),
- isRequired: fd.Cardinality() == pref.Required,
+ isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
+ isRequired: fd.Cardinality() == protoreflect.Required,
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
@@ -149,7 +149,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
})
- var maxDense pref.FieldNumber
+ var maxDense protoreflect.FieldNumber
for _, cf := range mi.orderedCoderFields {
if cf.num >= 16 && cf.num >= 2*maxDense {
break
@@ -175,12 +175,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
mi.needsInitCheck = needsInitCheck(mi.Desc)
if mi.methods.Marshal == nil && mi.methods.Size == nil {
- mi.methods.Flags |= piface.SupportMarshalDeterministic
+ mi.methods.Flags |= protoiface.SupportMarshalDeterministic
mi.methods.Marshal = mi.marshal
mi.methods.Size = mi.size
}
if mi.methods.Unmarshal == nil {
- mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+ mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown
mi.methods.Unmarshal = mi.unmarshal
}
if mi.methods.CheckInitialized == nil {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
index e899712388..576dcf3aac 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/strs"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// pointerCoderFuncs is a set of pointer encoding functions.
@@ -25,83 +25,83 @@ type pointerCoderFuncs struct {
// valueCoderFuncs is a set of protoreflect.Value encoding functions.
type valueCoderFuncs struct {
- size func(v pref.Value, tagsize int, opts marshalOptions) int
- marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error)
- unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error)
- isInit func(v pref.Value) error
- merge func(dst, src pref.Value, opts mergeOptions) pref.Value
+ size func(v protoreflect.Value, tagsize int, opts marshalOptions) int
+ marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error)
+ unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error)
+ isInit func(v protoreflect.Value) error
+ merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value
}
// fieldCoder returns pointer functions for a field, used for operating on
// struct fields.
-func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
switch {
case fd.IsMap():
return encoderFuncsForMap(fd, ft)
- case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked():
// Repeated fields (not packed).
if ft.Kind() != reflect.Slice {
break
}
ft := ft.Elem()
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if ft.Kind() == reflect.Bool {
return nil, coderBoolSlice
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if ft.Kind() == reflect.Int32 {
return nil, coderEnumSlice
}
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderInt32Slice
}
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSint32Slice
}
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderUint32Slice
}
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderInt64Slice
}
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSint64Slice
}
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderUint64Slice
}
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSfixed32Slice
}
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderFixed32Slice
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if ft.Kind() == reflect.Float32 {
return nil, coderFloatSlice
}
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSfixed64Slice
}
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderFixed64Slice
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if ft.Kind() == reflect.Float64 {
return nil, coderDoubleSlice
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
return nil, coderStringSliceValidateUTF8
}
@@ -114,19 +114,19 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer
if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
return nil, coderBytesSlice
}
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if ft.Kind() == reflect.String {
return nil, coderStringSlice
}
if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
return nil, coderBytesSlice
}
- case pref.MessageKind:
+ case protoreflect.MessageKind:
return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft)
- case pref.GroupKind:
+ case protoreflect.GroupKind:
return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft)
}
- case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked():
// Packed repeated fields.
//
// Only repeated fields of primitive numeric types
@@ -136,128 +136,128 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer
}
ft := ft.Elem()
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if ft.Kind() == reflect.Bool {
return nil, coderBoolPackedSlice
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if ft.Kind() == reflect.Int32 {
return nil, coderEnumPackedSlice
}
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderInt32PackedSlice
}
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSint32PackedSlice
}
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderUint32PackedSlice
}
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderInt64PackedSlice
}
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSint64PackedSlice
}
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderUint64PackedSlice
}
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSfixed32PackedSlice
}
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderFixed32PackedSlice
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if ft.Kind() == reflect.Float32 {
return nil, coderFloatPackedSlice
}
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSfixed64PackedSlice
}
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderFixed64PackedSlice
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if ft.Kind() == reflect.Float64 {
return nil, coderDoublePackedSlice
}
}
- case fd.Kind() == pref.MessageKind:
+ case fd.Kind() == protoreflect.MessageKind:
return getMessageInfo(ft), makeMessageFieldCoder(fd, ft)
- case fd.Kind() == pref.GroupKind:
+ case fd.Kind() == protoreflect.GroupKind:
return getMessageInfo(ft), makeGroupFieldCoder(fd, ft)
- case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil:
+ case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil:
// Populated oneof fields always encode even if set to the zero value,
// which normally are not encoded in proto3.
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if ft.Kind() == reflect.Bool {
return nil, coderBoolNoZero
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if ft.Kind() == reflect.Int32 {
return nil, coderEnumNoZero
}
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderInt32NoZero
}
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSint32NoZero
}
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderUint32NoZero
}
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderInt64NoZero
}
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSint64NoZero
}
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderUint64NoZero
}
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSfixed32NoZero
}
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderFixed32NoZero
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if ft.Kind() == reflect.Float32 {
return nil, coderFloatNoZero
}
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSfixed64NoZero
}
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderFixed64NoZero
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if ft.Kind() == reflect.Float64 {
return nil, coderDoubleNoZero
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
return nil, coderStringNoZeroValidateUTF8
}
@@ -270,7 +270,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer
if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
return nil, coderBytesNoZero
}
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if ft.Kind() == reflect.String {
return nil, coderStringNoZero
}
@@ -281,133 +281,133 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer
case ft.Kind() == reflect.Ptr:
ft := ft.Elem()
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if ft.Kind() == reflect.Bool {
return nil, coderBoolPtr
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if ft.Kind() == reflect.Int32 {
return nil, coderEnumPtr
}
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderInt32Ptr
}
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSint32Ptr
}
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderUint32Ptr
}
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderInt64Ptr
}
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSint64Ptr
}
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderUint64Ptr
}
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSfixed32Ptr
}
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderFixed32Ptr
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if ft.Kind() == reflect.Float32 {
return nil, coderFloatPtr
}
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSfixed64Ptr
}
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderFixed64Ptr
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if ft.Kind() == reflect.Float64 {
return nil, coderDoublePtr
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
return nil, coderStringPtrValidateUTF8
}
if ft.Kind() == reflect.String {
return nil, coderStringPtr
}
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if ft.Kind() == reflect.String {
return nil, coderStringPtr
}
}
default:
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if ft.Kind() == reflect.Bool {
return nil, coderBool
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
if ft.Kind() == reflect.Int32 {
return nil, coderEnum
}
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderInt32
}
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSint32
}
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderUint32
}
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderInt64
}
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSint64
}
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderUint64
}
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
if ft.Kind() == reflect.Int32 {
return nil, coderSfixed32
}
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
if ft.Kind() == reflect.Uint32 {
return nil, coderFixed32
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if ft.Kind() == reflect.Float32 {
return nil, coderFloat
}
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
if ft.Kind() == reflect.Int64 {
return nil, coderSfixed64
}
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
if ft.Kind() == reflect.Uint64 {
return nil, coderFixed64
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if ft.Kind() == reflect.Float64 {
return nil, coderDouble
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
return nil, coderStringValidateUTF8
}
@@ -420,7 +420,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer
if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
return nil, coderBytes
}
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if ft.Kind() == reflect.String {
return nil, coderString
}
@@ -434,122 +434,122 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer
// encoderFuncsForValue returns value functions for a field, used for
// extension values and map encoding.
-func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs {
+func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs {
switch {
- case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked():
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
return coderBoolSliceValue
- case pref.EnumKind:
+ case protoreflect.EnumKind:
return coderEnumSliceValue
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
return coderInt32SliceValue
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
return coderSint32SliceValue
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
return coderUint32SliceValue
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
return coderInt64SliceValue
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
return coderSint64SliceValue
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
return coderUint64SliceValue
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
return coderSfixed32SliceValue
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
return coderFixed32SliceValue
- case pref.FloatKind:
+ case protoreflect.FloatKind:
return coderFloatSliceValue
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
return coderSfixed64SliceValue
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
return coderFixed64SliceValue
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
return coderDoubleSliceValue
- case pref.StringKind:
+ case protoreflect.StringKind:
// We don't have a UTF-8 validating coder for repeated string fields.
// Value coders are used for extensions and maps.
// Extensions are never proto3, and maps never contain lists.
return coderStringSliceValue
- case pref.BytesKind:
+ case protoreflect.BytesKind:
return coderBytesSliceValue
- case pref.MessageKind:
+ case protoreflect.MessageKind:
return coderMessageSliceValue
- case pref.GroupKind:
+ case protoreflect.GroupKind:
return coderGroupSliceValue
}
- case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked():
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
return coderBoolPackedSliceValue
- case pref.EnumKind:
+ case protoreflect.EnumKind:
return coderEnumPackedSliceValue
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
return coderInt32PackedSliceValue
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
return coderSint32PackedSliceValue
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
return coderUint32PackedSliceValue
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
return coderInt64PackedSliceValue
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
return coderSint64PackedSliceValue
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
return coderUint64PackedSliceValue
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
return coderSfixed32PackedSliceValue
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
return coderFixed32PackedSliceValue
- case pref.FloatKind:
+ case protoreflect.FloatKind:
return coderFloatPackedSliceValue
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
return coderSfixed64PackedSliceValue
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
return coderFixed64PackedSliceValue
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
return coderDoublePackedSliceValue
}
default:
switch fd.Kind() {
default:
- case pref.BoolKind:
+ case protoreflect.BoolKind:
return coderBoolValue
- case pref.EnumKind:
+ case protoreflect.EnumKind:
return coderEnumValue
- case pref.Int32Kind:
+ case protoreflect.Int32Kind:
return coderInt32Value
- case pref.Sint32Kind:
+ case protoreflect.Sint32Kind:
return coderSint32Value
- case pref.Uint32Kind:
+ case protoreflect.Uint32Kind:
return coderUint32Value
- case pref.Int64Kind:
+ case protoreflect.Int64Kind:
return coderInt64Value
- case pref.Sint64Kind:
+ case protoreflect.Sint64Kind:
return coderSint64Value
- case pref.Uint64Kind:
+ case protoreflect.Uint64Kind:
return coderUint64Value
- case pref.Sfixed32Kind:
+ case protoreflect.Sfixed32Kind:
return coderSfixed32Value
- case pref.Fixed32Kind:
+ case protoreflect.Fixed32Kind:
return coderFixed32Value
- case pref.FloatKind:
+ case protoreflect.FloatKind:
return coderFloatValue
- case pref.Sfixed64Kind:
+ case protoreflect.Sfixed64Kind:
return coderSfixed64Value
- case pref.Fixed64Kind:
+ case protoreflect.Fixed64Kind:
return coderFixed64Value
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
return coderDoubleValue
- case pref.StringKind:
+ case protoreflect.StringKind:
if strs.EnforceUTF8(fd) {
return coderStringValueValidateUTF8
}
return coderStringValue
- case pref.BytesKind:
+ case protoreflect.BytesKind:
return coderBytesValue
- case pref.MessageKind:
+ case protoreflect.MessageKind:
return coderMessageValue
- case pref.GroupKind:
+ case protoreflect.GroupKind:
return coderGroupValue
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index acd61bb50b..11a6128ba5 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -8,7 +8,7 @@ import (
"fmt"
"reflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// unwrapper unwraps the value to the underlying value.
@@ -20,13 +20,13 @@ type unwrapper interface {
// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
type Converter interface {
// PBValueOf converts a reflect.Value to a protoreflect.Value.
- PBValueOf(reflect.Value) pref.Value
+ PBValueOf(reflect.Value) protoreflect.Value
// GoValueOf converts a protoreflect.Value to a reflect.Value.
- GoValueOf(pref.Value) reflect.Value
+ GoValueOf(protoreflect.Value) reflect.Value
// IsValidPB returns whether a protoreflect.Value is compatible with this type.
- IsValidPB(pref.Value) bool
+ IsValidPB(protoreflect.Value) bool
// IsValidGo returns whether a reflect.Value is compatible with this type.
IsValidGo(reflect.Value) bool
@@ -34,12 +34,12 @@ type Converter interface {
// New returns a new field value.
// For scalars, it returns the default value of the field.
// For composite types, it returns a new mutable value.
- New() pref.Value
+ New() protoreflect.Value
// Zero returns a new field value.
// For scalars, it returns the default value of the field.
// For composite types, it returns an immutable, empty value.
- Zero() pref.Value
+ Zero() protoreflect.Value
}
// NewConverter matches a Go type with a protobuf field and returns a Converter
@@ -50,7 +50,7 @@ type Converter interface {
// This matcher deliberately supports a wider range of Go types than what
// protoc-gen-go historically generated to be able to automatically wrap some
// v1 messages generated by other forks of protoc-gen-go.
-func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter {
switch {
case fd.IsList():
return newListConverter(t, fd)
@@ -76,68 +76,68 @@ var (
)
var (
- boolZero = pref.ValueOfBool(false)
- int32Zero = pref.ValueOfInt32(0)
- int64Zero = pref.ValueOfInt64(0)
- uint32Zero = pref.ValueOfUint32(0)
- uint64Zero = pref.ValueOfUint64(0)
- float32Zero = pref.ValueOfFloat32(0)
- float64Zero = pref.ValueOfFloat64(0)
- stringZero = pref.ValueOfString("")
- bytesZero = pref.ValueOfBytes(nil)
+ boolZero = protoreflect.ValueOfBool(false)
+ int32Zero = protoreflect.ValueOfInt32(0)
+ int64Zero = protoreflect.ValueOfInt64(0)
+ uint32Zero = protoreflect.ValueOfUint32(0)
+ uint64Zero = protoreflect.ValueOfUint64(0)
+ float32Zero = protoreflect.ValueOfFloat32(0)
+ float64Zero = protoreflect.ValueOfFloat64(0)
+ stringZero = protoreflect.ValueOfString("")
+ bytesZero = protoreflect.ValueOfBytes(nil)
)
-func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
- defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value {
- if fd.Cardinality() == pref.Repeated {
+func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter {
+ defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value {
+ if fd.Cardinality() == protoreflect.Repeated {
// Default isn't defined for repeated fields.
return zero
}
return fd.Default()
}
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
if t.Kind() == reflect.Bool {
return &boolConverter{t, defVal(fd, boolZero)}
}
- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if t.Kind() == reflect.Int32 {
return &int32Converter{t, defVal(fd, int32Zero)}
}
- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if t.Kind() == reflect.Int64 {
return &int64Converter{t, defVal(fd, int64Zero)}
}
- case pref.Uint32Kind, pref.Fixed32Kind:
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if t.Kind() == reflect.Uint32 {
return &uint32Converter{t, defVal(fd, uint32Zero)}
}
- case pref.Uint64Kind, pref.Fixed64Kind:
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if t.Kind() == reflect.Uint64 {
return &uint64Converter{t, defVal(fd, uint64Zero)}
}
- case pref.FloatKind:
+ case protoreflect.FloatKind:
if t.Kind() == reflect.Float32 {
return &float32Converter{t, defVal(fd, float32Zero)}
}
- case pref.DoubleKind:
+ case protoreflect.DoubleKind:
if t.Kind() == reflect.Float64 {
return &float64Converter{t, defVal(fd, float64Zero)}
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
return &stringConverter{t, defVal(fd, stringZero)}
}
- case pref.BytesKind:
+ case protoreflect.BytesKind:
if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
return &bytesConverter{t, defVal(fd, bytesZero)}
}
- case pref.EnumKind:
+ case protoreflect.EnumKind:
// Handle enums, which must be a named int32 type.
if t.Kind() == reflect.Int32 {
return newEnumConverter(t, fd)
}
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
return newMessageConverter(t)
}
panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
@@ -145,184 +145,184 @@ func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
type boolConverter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfBool(v.Bool())
+ return protoreflect.ValueOfBool(v.Bool())
}
-func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(v.Bool()).Convert(c.goType)
}
-func (c *boolConverter) IsValidPB(v pref.Value) bool {
+func (c *boolConverter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(bool)
return ok
}
func (c *boolConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *boolConverter) New() pref.Value { return c.def }
-func (c *boolConverter) Zero() pref.Value { return c.def }
+func (c *boolConverter) New() protoreflect.Value { return c.def }
+func (c *boolConverter) Zero() protoreflect.Value { return c.def }
type int32Converter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value {
+func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfInt32(int32(v.Int()))
+ return protoreflect.ValueOfInt32(int32(v.Int()))
}
-func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value {
+func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(int32(v.Int())).Convert(c.goType)
}
-func (c *int32Converter) IsValidPB(v pref.Value) bool {
+func (c *int32Converter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(int32)
return ok
}
func (c *int32Converter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *int32Converter) New() pref.Value { return c.def }
-func (c *int32Converter) Zero() pref.Value { return c.def }
+func (c *int32Converter) New() protoreflect.Value { return c.def }
+func (c *int32Converter) Zero() protoreflect.Value { return c.def }
type int64Converter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value {
+func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfInt64(int64(v.Int()))
+ return protoreflect.ValueOfInt64(int64(v.Int()))
}
-func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value {
+func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(int64(v.Int())).Convert(c.goType)
}
-func (c *int64Converter) IsValidPB(v pref.Value) bool {
+func (c *int64Converter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(int64)
return ok
}
func (c *int64Converter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *int64Converter) New() pref.Value { return c.def }
-func (c *int64Converter) Zero() pref.Value { return c.def }
+func (c *int64Converter) New() protoreflect.Value { return c.def }
+func (c *int64Converter) Zero() protoreflect.Value { return c.def }
type uint32Converter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value {
+func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfUint32(uint32(v.Uint()))
+ return protoreflect.ValueOfUint32(uint32(v.Uint()))
}
-func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value {
+func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType)
}
-func (c *uint32Converter) IsValidPB(v pref.Value) bool {
+func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(uint32)
return ok
}
func (c *uint32Converter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *uint32Converter) New() pref.Value { return c.def }
-func (c *uint32Converter) Zero() pref.Value { return c.def }
+func (c *uint32Converter) New() protoreflect.Value { return c.def }
+func (c *uint32Converter) Zero() protoreflect.Value { return c.def }
type uint64Converter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value {
+func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfUint64(uint64(v.Uint()))
+ return protoreflect.ValueOfUint64(uint64(v.Uint()))
}
-func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value {
+func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType)
}
-func (c *uint64Converter) IsValidPB(v pref.Value) bool {
+func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(uint64)
return ok
}
func (c *uint64Converter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *uint64Converter) New() pref.Value { return c.def }
-func (c *uint64Converter) Zero() pref.Value { return c.def }
+func (c *uint64Converter) New() protoreflect.Value { return c.def }
+func (c *uint64Converter) Zero() protoreflect.Value { return c.def }
type float32Converter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value {
+func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfFloat32(float32(v.Float()))
+ return protoreflect.ValueOfFloat32(float32(v.Float()))
}
-func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value {
+func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(float32(v.Float())).Convert(c.goType)
}
-func (c *float32Converter) IsValidPB(v pref.Value) bool {
+func (c *float32Converter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(float32)
return ok
}
func (c *float32Converter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *float32Converter) New() pref.Value { return c.def }
-func (c *float32Converter) Zero() pref.Value { return c.def }
+func (c *float32Converter) New() protoreflect.Value { return c.def }
+func (c *float32Converter) Zero() protoreflect.Value { return c.def }
type float64Converter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value {
+func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfFloat64(float64(v.Float()))
+ return protoreflect.ValueOfFloat64(float64(v.Float()))
}
-func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value {
+func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(float64(v.Float())).Convert(c.goType)
}
-func (c *float64Converter) IsValidPB(v pref.Value) bool {
+func (c *float64Converter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(float64)
return ok
}
func (c *float64Converter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *float64Converter) New() pref.Value { return c.def }
-func (c *float64Converter) Zero() pref.Value { return c.def }
+func (c *float64Converter) New() protoreflect.Value { return c.def }
+func (c *float64Converter) Zero() protoreflect.Value { return c.def }
type stringConverter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfString(v.Convert(stringType).String())
+ return protoreflect.ValueOfString(v.Convert(stringType).String())
}
-func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
// pref.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
@@ -331,71 +331,71 @@ func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value {
}
return reflect.ValueOf(s).Convert(c.goType)
}
-func (c *stringConverter) IsValidPB(v pref.Value) bool {
+func (c *stringConverter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().(string)
return ok
}
func (c *stringConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *stringConverter) New() pref.Value { return c.def }
-func (c *stringConverter) Zero() pref.Value { return c.def }
+func (c *stringConverter) New() protoreflect.Value { return c.def }
+func (c *stringConverter) Zero() protoreflect.Value { return c.def }
type bytesConverter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
if c.goType.Kind() == reflect.String && v.Len() == 0 {
- return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil)
+ return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil)
}
- return pref.ValueOfBytes(v.Convert(bytesType).Bytes())
+ return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes())
}
-func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(v.Bytes()).Convert(c.goType)
}
-func (c *bytesConverter) IsValidPB(v pref.Value) bool {
+func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool {
_, ok := v.Interface().([]byte)
return ok
}
func (c *bytesConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *bytesConverter) New() pref.Value { return c.def }
-func (c *bytesConverter) Zero() pref.Value { return c.def }
+func (c *bytesConverter) New() protoreflect.Value { return c.def }
+func (c *bytesConverter) Zero() protoreflect.Value { return c.def }
type enumConverter struct {
goType reflect.Type
- def pref.Value
+ def protoreflect.Value
}
-func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter {
- var def pref.Value
- if fd.Cardinality() == pref.Repeated {
- def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter {
+ var def protoreflect.Value
+ if fd.Cardinality() == protoreflect.Repeated {
+ def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number())
} else {
def = fd.Default()
}
return &enumConverter{goType, def}
}
-func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfEnum(pref.EnumNumber(v.Int()))
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int()))
}
-func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value {
return reflect.ValueOf(v.Enum()).Convert(c.goType)
}
-func (c *enumConverter) IsValidPB(v pref.Value) bool {
- _, ok := v.Interface().(pref.EnumNumber)
+func (c *enumConverter) IsValidPB(v protoreflect.Value) bool {
+ _, ok := v.Interface().(protoreflect.EnumNumber)
return ok
}
@@ -403,11 +403,11 @@ func (c *enumConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *enumConverter) New() pref.Value {
+func (c *enumConverter) New() protoreflect.Value {
return c.def
}
-func (c *enumConverter) Zero() pref.Value {
+func (c *enumConverter) Zero() protoreflect.Value {
return c.def
}
@@ -419,7 +419,7 @@ func newMessageConverter(goType reflect.Type) Converter {
return &messageConverter{goType}
}
-func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
@@ -430,13 +430,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value {
v = reflect.Zero(reflect.PtrTo(v.Type()))
}
}
- if m, ok := v.Interface().(pref.ProtoMessage); ok {
- return pref.ValueOfMessage(m.ProtoReflect())
+ if m, ok := v.Interface().(protoreflect.ProtoMessage); ok {
+ return protoreflect.ValueOfMessage(m.ProtoReflect())
}
- return pref.ValueOfMessage(legacyWrapMessage(v))
+ return protoreflect.ValueOfMessage(legacyWrapMessage(v))
}
-func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value {
m := v.Message()
var rv reflect.Value
if u, ok := m.(unwrapper); ok {
@@ -460,7 +460,7 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value {
return rv
}
-func (c *messageConverter) IsValidPB(v pref.Value) bool {
+func (c *messageConverter) IsValidPB(v protoreflect.Value) bool {
m := v.Message()
var rv reflect.Value
if u, ok := m.(unwrapper); ok {
@@ -478,14 +478,14 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *messageConverter) New() pref.Value {
+func (c *messageConverter) New() protoreflect.Value {
if c.isNonPointer() {
return c.PBValueOf(reflect.New(c.goType).Elem())
}
return c.PBValueOf(reflect.New(c.goType.Elem()))
}
-func (c *messageConverter) Zero() pref.Value {
+func (c *messageConverter) Zero() protoreflect.Value {
return c.PBValueOf(reflect.Zero(c.goType))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
index 6fccab520e..f89136516f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -8,10 +8,10 @@ import (
"fmt"
"reflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
-func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter {
switch {
case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice:
return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)}
@@ -26,16 +26,16 @@ type listConverter struct {
c Converter
}
-func (c *listConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
pv := reflect.New(c.goType)
pv.Elem().Set(v)
- return pref.ValueOfList(&listReflect{pv, c.c})
+ return protoreflect.ValueOfList(&listReflect{pv, c.c})
}
-func (c *listConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value {
rv := v.List().(*listReflect).v
if rv.IsNil() {
return reflect.Zero(c.goType)
@@ -43,7 +43,7 @@ func (c *listConverter) GoValueOf(v pref.Value) reflect.Value {
return rv.Elem()
}
-func (c *listConverter) IsValidPB(v pref.Value) bool {
+func (c *listConverter) IsValidPB(v protoreflect.Value) bool {
list, ok := v.Interface().(*listReflect)
if !ok {
return false
@@ -55,12 +55,12 @@ func (c *listConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *listConverter) New() pref.Value {
- return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c})
+func (c *listConverter) New() protoreflect.Value {
+ return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c})
}
-func (c *listConverter) Zero() pref.Value {
- return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c})
+func (c *listConverter) Zero() protoreflect.Value {
+ return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c})
}
type listPtrConverter struct {
@@ -68,18 +68,18 @@ type listPtrConverter struct {
c Converter
}
-func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfList(&listReflect{v, c.c})
+ return protoreflect.ValueOfList(&listReflect{v, c.c})
}
-func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value {
return v.List().(*listReflect).v
}
-func (c *listPtrConverter) IsValidPB(v pref.Value) bool {
+func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool {
list, ok := v.Interface().(*listReflect)
if !ok {
return false
@@ -91,11 +91,11 @@ func (c *listPtrConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *listPtrConverter) New() pref.Value {
+func (c *listPtrConverter) New() protoreflect.Value {
return c.PBValueOf(reflect.New(c.goType.Elem()))
}
-func (c *listPtrConverter) Zero() pref.Value {
+func (c *listPtrConverter) Zero() protoreflect.Value {
return c.PBValueOf(reflect.Zero(c.goType))
}
@@ -110,16 +110,16 @@ func (ls *listReflect) Len() int {
}
return ls.v.Elem().Len()
}
-func (ls *listReflect) Get(i int) pref.Value {
+func (ls *listReflect) Get(i int) protoreflect.Value {
return ls.conv.PBValueOf(ls.v.Elem().Index(i))
}
-func (ls *listReflect) Set(i int, v pref.Value) {
+func (ls *listReflect) Set(i int, v protoreflect.Value) {
ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v))
}
-func (ls *listReflect) Append(v pref.Value) {
+func (ls *listReflect) Append(v protoreflect.Value) {
ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v)))
}
-func (ls *listReflect) AppendMutable() pref.Value {
+func (ls *listReflect) AppendMutable() protoreflect.Value {
if _, ok := ls.conv.(*messageConverter); !ok {
panic("invalid AppendMutable on list with non-message type")
}
@@ -130,7 +130,7 @@ func (ls *listReflect) AppendMutable() pref.Value {
func (ls *listReflect) Truncate(i int) {
ls.v.Elem().Set(ls.v.Elem().Slice(0, i))
}
-func (ls *listReflect) NewElement() pref.Value {
+func (ls *listReflect) NewElement() protoreflect.Value {
return ls.conv.New()
}
func (ls *listReflect) IsValid() bool {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
index de06b2593f..f30b0a0576 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -8,7 +8,7 @@ import (
"fmt"
"reflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type mapConverter struct {
@@ -16,7 +16,7 @@ type mapConverter struct {
keyConv, valConv Converter
}
-func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter {
+func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter {
if t.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
}
@@ -27,18 +27,18 @@ func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter {
}
}
-func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value {
+func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value {
if v.Type() != c.goType {
panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
}
- return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv})
+ return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv})
}
-func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value {
+func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value {
return v.Map().(*mapReflect).v
}
-func (c *mapConverter) IsValidPB(v pref.Value) bool {
+func (c *mapConverter) IsValidPB(v protoreflect.Value) bool {
mapv, ok := v.Interface().(*mapReflect)
if !ok {
return false
@@ -50,11 +50,11 @@ func (c *mapConverter) IsValidGo(v reflect.Value) bool {
return v.IsValid() && v.Type() == c.goType
}
-func (c *mapConverter) New() pref.Value {
+func (c *mapConverter) New() protoreflect.Value {
return c.PBValueOf(reflect.MakeMap(c.goType))
}
-func (c *mapConverter) Zero() pref.Value {
+func (c *mapConverter) Zero() protoreflect.Value {
return c.PBValueOf(reflect.Zero(c.goType))
}
@@ -67,29 +67,29 @@ type mapReflect struct {
func (ms *mapReflect) Len() int {
return ms.v.Len()
}
-func (ms *mapReflect) Has(k pref.MapKey) bool {
+func (ms *mapReflect) Has(k protoreflect.MapKey) bool {
rk := ms.keyConv.GoValueOf(k.Value())
rv := ms.v.MapIndex(rk)
return rv.IsValid()
}
-func (ms *mapReflect) Get(k pref.MapKey) pref.Value {
+func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value {
rk := ms.keyConv.GoValueOf(k.Value())
rv := ms.v.MapIndex(rk)
if !rv.IsValid() {
- return pref.Value{}
+ return protoreflect.Value{}
}
return ms.valConv.PBValueOf(rv)
}
-func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) {
+func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) {
rk := ms.keyConv.GoValueOf(k.Value())
rv := ms.valConv.GoValueOf(v)
ms.v.SetMapIndex(rk, rv)
}
-func (ms *mapReflect) Clear(k pref.MapKey) {
+func (ms *mapReflect) Clear(k protoreflect.MapKey) {
rk := ms.keyConv.GoValueOf(k.Value())
ms.v.SetMapIndex(rk, reflect.Value{})
}
-func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value {
+func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value {
if _, ok := ms.valConv.(*messageConverter); !ok {
panic("invalid Mutable on map with non-message value type")
}
@@ -100,7 +100,7 @@ func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value {
}
return v
}
-func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) {
+func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
iter := mapRange(ms.v)
for iter.Next() {
k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
@@ -110,7 +110,7 @@ func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) {
}
}
}
-func (ms *mapReflect) NewValue() pref.Value {
+func (ms *mapReflect) NewValue() protoreflect.Value {
return ms.valConv.New()
}
func (ms *mapReflect) IsValid() bool {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index c65b0325c1..cda0520c27 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -12,9 +12,8 @@ import (
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
- piface "google.golang.org/protobuf/runtime/protoiface"
)
var errDecode = errors.New("cannot parse invalid wire-format data")
@@ -38,14 +37,16 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
}
}
-func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 }
+func (o unmarshalOptions) DiscardUnknown() bool {
+ return o.flags&protoiface.UnmarshalDiscardUnknown != 0
+}
func (o unmarshalOptions) IsDefault() bool {
- return o.flags == 0 && o.resolver == preg.GlobalTypes
+ return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
}
var lazyUnmarshalOptions = unmarshalOptions{
- resolver: preg.GlobalTypes,
+ resolver: protoregistry.GlobalTypes,
depth: protowire.DefaultRecursionLimit,
}
@@ -55,7 +56,7 @@ type unmarshalOutput struct {
}
// unmarshal is protoreflect.Methods.Unmarshal.
-func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
var p pointer
if ms, ok := in.Message.(*messageState); ok {
p = ms.pointer()
@@ -67,11 +68,11 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp
resolver: in.Resolver,
depth: in.Depth,
})
- var flags piface.UnmarshalOutputFlags
+ var flags protoiface.UnmarshalOutputFlags
if out.initialized {
- flags |= piface.UnmarshalInitialized
+ flags |= protoiface.UnmarshalInitialized
}
- return piface.UnmarshalOutput{
+ return protoiface.UnmarshalOutput{
Flags: flags,
}, err
}
@@ -210,7 +211,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
var err error
xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num)
if err != nil {
- if err == preg.NotFound {
+ if err == protoregistry.NotFound {
return out, errUnknown
}
return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go
index 8c1eab4bfd..5f3ef5ad73 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/enum.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go
@@ -7,15 +7,15 @@ package impl
import (
"reflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type EnumInfo struct {
GoReflectType reflect.Type // int32 kind
- Desc pref.EnumDescriptor
+ Desc protoreflect.EnumDescriptor
}
-func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum {
- return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum)
+func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum {
+ return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum)
}
-func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc }
+func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
index e904fd9936..cb25b0bae1 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
@@ -9,8 +9,8 @@ import (
"sync"
"sync/atomic"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// ExtensionInfo implements ExtensionType.
@@ -45,7 +45,7 @@ type ExtensionInfo struct {
// since the message may no longer implement the MessageV1 interface.
//
// Deprecated: Use the ExtendedType method instead.
- ExtendedType piface.MessageV1
+ ExtendedType protoiface.MessageV1
// ExtensionType is the zero value of the extension type.
//
@@ -83,31 +83,31 @@ const (
extensionInfoFullInit = 2
)
-func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) {
+func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) {
xi.goType = goType
xi.desc = extensionTypeDescriptor{xd, xi}
xi.init = extensionInfoDescInit
}
-func (xi *ExtensionInfo) New() pref.Value {
+func (xi *ExtensionInfo) New() protoreflect.Value {
return xi.lazyInit().New()
}
-func (xi *ExtensionInfo) Zero() pref.Value {
+func (xi *ExtensionInfo) Zero() protoreflect.Value {
return xi.lazyInit().Zero()
}
-func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value {
+func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value {
return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
}
-func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} {
+func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} {
return xi.lazyInit().GoValueOf(v).Interface()
}
-func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool {
+func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool {
return xi.lazyInit().IsValidPB(v)
}
func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
}
-func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor {
+func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
if atomic.LoadUint32(&xi.init) < extensionInfoDescInit {
xi.lazyInitSlow()
}
@@ -144,13 +144,13 @@ func (xi *ExtensionInfo) lazyInitSlow() {
}
type extensionTypeDescriptor struct {
- pref.ExtensionDescriptor
+ protoreflect.ExtensionDescriptor
xi *ExtensionInfo
}
-func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType {
+func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType {
return xtd.xi
}
-func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor {
+func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
return xtd.ExtensionDescriptor
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
index f7d7ffb510..c2a803bb2f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
@@ -13,13 +13,12 @@ import (
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
)
// legacyEnumName returns the name of enums used in legacy code.
// It is neither the protobuf full name nor the qualified Go name,
// but rather an odd hybrid of both.
-func legacyEnumName(ed pref.EnumDescriptor) string {
+func legacyEnumName(ed protoreflect.EnumDescriptor) string {
var protoPkg string
enumName := string(ed.FullName())
if fd := ed.ParentFile(); fd != nil {
@@ -34,68 +33,68 @@ func legacyEnumName(ed pref.EnumDescriptor) string {
// legacyWrapEnum wraps v as a protoreflect.Enum,
// where v must be a int32 kind and not implement the v2 API already.
-func legacyWrapEnum(v reflect.Value) pref.Enum {
+func legacyWrapEnum(v reflect.Value) protoreflect.Enum {
et := legacyLoadEnumType(v.Type())
- return et.New(pref.EnumNumber(v.Int()))
+ return et.New(protoreflect.EnumNumber(v.Int()))
}
var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType
// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t,
// where t must be an int32 kind and not implement the v2 API already.
-func legacyLoadEnumType(t reflect.Type) pref.EnumType {
+func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType {
// Fast-path: check if a EnumType is cached for this concrete type.
if et, ok := legacyEnumTypeCache.Load(t); ok {
- return et.(pref.EnumType)
+ return et.(protoreflect.EnumType)
}
// Slow-path: derive enum descriptor and initialize EnumType.
- var et pref.EnumType
+ var et protoreflect.EnumType
ed := LegacyLoadEnumDesc(t)
et = &legacyEnumType{
desc: ed,
goType: t,
}
if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok {
- return et.(pref.EnumType)
+ return et.(protoreflect.EnumType)
}
return et
}
type legacyEnumType struct {
- desc pref.EnumDescriptor
+ desc protoreflect.EnumDescriptor
goType reflect.Type
m sync.Map // map[protoreflect.EnumNumber]proto.Enum
}
-func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum {
+func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum {
if e, ok := t.m.Load(n); ok {
- return e.(pref.Enum)
+ return e.(protoreflect.Enum)
}
e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType}
t.m.Store(n, e)
return e
}
-func (t *legacyEnumType) Descriptor() pref.EnumDescriptor {
+func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor {
return t.desc
}
type legacyEnumWrapper struct {
- num pref.EnumNumber
- pbTyp pref.EnumType
+ num protoreflect.EnumNumber
+ pbTyp protoreflect.EnumType
goTyp reflect.Type
}
-func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor {
+func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor {
return e.pbTyp.Descriptor()
}
-func (e *legacyEnumWrapper) Type() pref.EnumType {
+func (e *legacyEnumWrapper) Type() protoreflect.EnumType {
return e.pbTyp
}
-func (e *legacyEnumWrapper) Number() pref.EnumNumber {
+func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber {
return e.num
}
-func (e *legacyEnumWrapper) ProtoReflect() pref.Enum {
+func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum {
return e
}
func (e *legacyEnumWrapper) protoUnwrap() interface{} {
@@ -105,8 +104,8 @@ func (e *legacyEnumWrapper) protoUnwrap() interface{} {
}
var (
- _ pref.Enum = (*legacyEnumWrapper)(nil)
- _ unwrapper = (*legacyEnumWrapper)(nil)
+ _ protoreflect.Enum = (*legacyEnumWrapper)(nil)
+ _ unwrapper = (*legacyEnumWrapper)(nil)
)
var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
@@ -115,15 +114,15 @@ var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
// which must be an int32 kind and not implement the v2 API already.
//
// This is exported for testing purposes.
-func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor {
// Fast-path: check if an EnumDescriptor is cached for this concrete type.
if ed, ok := legacyEnumDescCache.Load(t); ok {
- return ed.(pref.EnumDescriptor)
+ return ed.(protoreflect.EnumDescriptor)
}
// Slow-path: initialize EnumDescriptor from the raw descriptor.
ev := reflect.Zero(t).Interface()
- if _, ok := ev.(pref.Enum); ok {
+ if _, ok := ev.(protoreflect.Enum); ok {
panic(fmt.Sprintf("%v already implements proto.Enum", t))
}
edV1, ok := ev.(enumV1)
@@ -132,7 +131,7 @@ func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
}
b, idxs := edV1.EnumDescriptor()
- var ed pref.EnumDescriptor
+ var ed protoreflect.EnumDescriptor
if len(idxs) == 1 {
ed = legacyLoadFileDesc(b).Enums().Get(idxs[0])
} else {
@@ -158,10 +157,10 @@ var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescript
// We are unable to use the global enum registry since it is
// unfortunately keyed by the protobuf full name, which we also do not know.
// Thus, this produces some bogus enum descriptor based on the Go type name.
-func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor {
// Fast-path: check if an EnumDescriptor is cached for this concrete type.
if ed, ok := aberrantEnumDescCache.Load(t); ok {
- return ed.(pref.EnumDescriptor)
+ return ed.(protoreflect.EnumDescriptor)
}
// Slow-path: construct a bogus, but unique EnumDescriptor.
@@ -182,7 +181,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
// An exhaustive query is clearly impractical, but can be best-effort.
if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok {
- return ed.(pref.EnumDescriptor)
+ return ed.(protoreflect.EnumDescriptor)
}
return ed
}
@@ -192,7 +191,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
// It should be sufficiently unique within a program.
//
// This is exported for testing purposes.
-func AberrantDeriveFullName(t reflect.Type) pref.FullName {
+func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName {
sanitize := func(r rune) rune {
switch {
case r == '/':
@@ -215,5 +214,5 @@ func AberrantDeriveFullName(t reflect.Type) pref.FullName {
ss[i] = "x" + s
}
}
- return pref.FullName(strings.Join(ss, "."))
+ return protoreflect.FullName(strings.Join(ss, "."))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
index e3fb0b5785..9b64ad5bba 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
@@ -12,21 +12,21 @@ import (
"reflect"
"google.golang.org/protobuf/internal/errors"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// These functions exist to support exported APIs in generated protobufs.
// While these are deprecated, they cannot be removed for compatibility reasons.
// LegacyEnumName returns the name of enums used in legacy code.
-func (Export) LegacyEnumName(ed pref.EnumDescriptor) string {
+func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string {
return legacyEnumName(ed)
}
// LegacyMessageTypeOf returns the protoreflect.MessageType for m,
// with name used as the message name if necessary.
-func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType {
+func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType {
if mv := (Export{}).protoMessageV2Of(m); mv != nil {
return mv.ProtoReflect().Type()
}
@@ -36,9 +36,9 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M
// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input.
// The input can either be a string representing the enum value by name,
// or a number representing the enum number itself.
-func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) {
+func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) {
if b[0] == '"' {
- var name pref.Name
+ var name protoreflect.Name
if err := json.Unmarshal(b, &name); err != nil {
return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
}
@@ -48,7 +48,7 @@ func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumb
}
return ev.Number(), nil
} else {
- var num pref.EnumNumber
+ var num protoreflect.EnumNumber
if err := json.Unmarshal(b, &num); err != nil {
return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
}
@@ -81,8 +81,8 @@ func (Export) CompressGZIP(in []byte) (out []byte) {
blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3.
blockSize = len(in)
}
- binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000)
- binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff)
+ binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize))
+ binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize))
out = append(out, blockHeader[:]...)
out = append(out, in[:blockSize]...)
in = in[blockSize:]
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 49e723161c..87b30d0504 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -12,16 +12,16 @@ import (
ptag "google.golang.org/protobuf/internal/encoding/tag"
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/pragma"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
)
func (xi *ExtensionInfo) initToLegacy() {
xd := xi.desc
- var parent piface.MessageV1
+ var parent protoiface.MessageV1
messageName := xd.ContainingMessage().FullName()
- if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil {
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil {
// Create a new parent message and unwrap it if possible.
mv := mt.New().Interface()
t := reflect.TypeOf(mv)
@@ -31,7 +31,7 @@ func (xi *ExtensionInfo) initToLegacy() {
// Check whether the message implements the legacy v1 Message interface.
mz := reflect.Zero(t).Interface()
- if mz, ok := mz.(piface.MessageV1); ok {
+ if mz, ok := mz.(protoiface.MessageV1); ok {
parent = mz
}
}
@@ -46,7 +46,7 @@ func (xi *ExtensionInfo) initToLegacy() {
// Reconstruct the legacy enum full name.
var enumName string
- if xd.Kind() == pref.EnumKind {
+ if xd.Kind() == protoreflect.EnumKind {
enumName = legacyEnumName(xd.Enum())
}
@@ -77,16 +77,16 @@ func (xi *ExtensionInfo) initFromLegacy() {
// field number is specified. In such a case, use a placeholder.
if xi.ExtendedType == nil || xi.ExtensionType == nil {
xd := placeholderExtension{
- name: pref.FullName(xi.Name),
- number: pref.FieldNumber(xi.Field),
+ name: protoreflect.FullName(xi.Name),
+ number: protoreflect.FieldNumber(xi.Field),
}
xi.desc = extensionTypeDescriptor{xd, xi}
return
}
// Resolve enum or message dependencies.
- var ed pref.EnumDescriptor
- var md pref.MessageDescriptor
+ var ed protoreflect.EnumDescriptor
+ var md protoreflect.MessageDescriptor
t := reflect.TypeOf(xi.ExtensionType)
isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
@@ -94,18 +94,18 @@ func (xi *ExtensionInfo) initFromLegacy() {
t = t.Elem()
}
switch v := reflect.Zero(t).Interface().(type) {
- case pref.Enum:
+ case protoreflect.Enum:
ed = v.Descriptor()
case enumV1:
ed = LegacyLoadEnumDesc(t)
- case pref.ProtoMessage:
+ case protoreflect.ProtoMessage:
md = v.ProtoReflect().Descriptor()
case messageV1:
md = LegacyLoadMessageDesc(t)
}
// Derive basic field information from the struct tag.
- var evs pref.EnumValueDescriptors
+ var evs protoreflect.EnumValueDescriptors
if ed != nil {
evs = ed.Values()
}
@@ -114,8 +114,8 @@ func (xi *ExtensionInfo) initFromLegacy() {
// Construct a v2 ExtensionType.
xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)}
xd.L0.ParentFile = filedesc.SurrogateProto2
- xd.L0.FullName = pref.FullName(xi.Name)
- xd.L1.Number = pref.FieldNumber(xi.Field)
+ xd.L0.FullName = protoreflect.FullName(xi.Name)
+ xd.L1.Number = protoreflect.FieldNumber(xi.Field)
xd.L1.Cardinality = fd.L1.Cardinality
xd.L1.Kind = fd.L1.Kind
xd.L2.IsPacked = fd.L1.IsPacked
@@ -138,39 +138,39 @@ func (xi *ExtensionInfo) initFromLegacy() {
}
type placeholderExtension struct {
- name pref.FullName
- number pref.FieldNumber
+ name protoreflect.FullName
+ number protoreflect.FieldNumber
}
-func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil }
-func (x placeholderExtension) Parent() pref.Descriptor { return nil }
-func (x placeholderExtension) Index() int { return 0 }
-func (x placeholderExtension) Syntax() pref.Syntax { return 0 }
-func (x placeholderExtension) Name() pref.Name { return x.name.Name() }
-func (x placeholderExtension) FullName() pref.FullName { return x.name }
-func (x placeholderExtension) IsPlaceholder() bool { return true }
-func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field }
-func (x placeholderExtension) Number() pref.FieldNumber { return x.number }
-func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 }
-func (x placeholderExtension) Kind() pref.Kind { return 0 }
-func (x placeholderExtension) HasJSONName() bool { return false }
-func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" }
-func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" }
-func (x placeholderExtension) HasPresence() bool { return false }
-func (x placeholderExtension) HasOptionalKeyword() bool { return false }
-func (x placeholderExtension) IsExtension() bool { return true }
-func (x placeholderExtension) IsWeak() bool { return false }
-func (x placeholderExtension) IsPacked() bool { return false }
-func (x placeholderExtension) IsList() bool { return false }
-func (x placeholderExtension) IsMap() bool { return false }
-func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil }
-func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil }
-func (x placeholderExtension) HasDefault() bool { return false }
-func (x placeholderExtension) Default() pref.Value { return pref.Value{} }
-func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil }
-func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil }
-func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil }
-func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil }
-func (x placeholderExtension) Message() pref.MessageDescriptor { return nil }
-func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return }
-func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return }
+func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil }
+func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil }
+func (x placeholderExtension) Index() int { return 0 }
+func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 }
+func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() }
+func (x placeholderExtension) FullName() protoreflect.FullName { return x.name }
+func (x placeholderExtension) IsPlaceholder() bool { return true }
+func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field }
+func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number }
+func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 }
+func (x placeholderExtension) Kind() protoreflect.Kind { return 0 }
+func (x placeholderExtension) HasJSONName() bool { return false }
+func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" }
+func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" }
+func (x placeholderExtension) HasPresence() bool { return false }
+func (x placeholderExtension) HasOptionalKeyword() bool { return false }
+func (x placeholderExtension) IsExtension() bool { return true }
+func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsPacked() bool { return false }
+func (x placeholderExtension) IsList() bool { return false }
+func (x placeholderExtension) IsMap() bool { return false }
+func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil }
+func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil }
+func (x placeholderExtension) HasDefault() bool { return false }
+func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} }
+func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil }
+func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil }
+func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil }
+func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil }
+func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil }
+func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return }
+func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 029feeefd7..61c483fac0 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -16,14 +16,12 @@ import (
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
- piface "google.golang.org/protobuf/runtime/protoiface"
)
// legacyWrapMessage wraps v as a protoreflect.Message,
// where v must be a *struct kind and not implement the v2 API already.
-func legacyWrapMessage(v reflect.Value) pref.Message {
+func legacyWrapMessage(v reflect.Value) protoreflect.Message {
t := v.Type()
if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
return aberrantMessage{v: v}
@@ -35,7 +33,7 @@ func legacyWrapMessage(v reflect.Value) pref.Message {
// legacyLoadMessageType dynamically loads a protoreflect.Type for t,
// where t must be not implement the v2 API already.
// The provided name is used if it cannot be determined from the message.
-func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType {
+func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType {
if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
return aberrantMessageType{t}
}
@@ -47,7 +45,7 @@ var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo
// legacyLoadMessageInfo dynamically loads a *MessageInfo for t,
// where t must be a *struct kind and not implement the v2 API already.
// The provided name is used if it cannot be determined from the message.
-func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
+func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo {
// Fast-path: check if a MessageInfo is cached for this concrete type.
if mt, ok := legacyMessageTypeCache.Load(t); ok {
return mt.(*MessageInfo)
@@ -68,7 +66,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
// supports deterministic serialization or not, but this
// preserves the v1 implementation's behavior of always
// calling Marshal methods when present.
- mi.methods.Flags |= piface.SupportMarshalDeterministic
+ mi.methods.Flags |= protoiface.SupportMarshalDeterministic
}
if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal {
mi.methods.Unmarshal = legacyUnmarshal
@@ -89,18 +87,18 @@ var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDesc
// which should be a *struct kind and must not implement the v2 API already.
//
// This is exported for testing purposes.
-func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor {
+func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor {
return legacyLoadMessageDesc(t, "")
}
-func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor {
// Fast-path: check if a MessageDescriptor is cached for this concrete type.
if mi, ok := legacyMessageDescCache.Load(t); ok {
- return mi.(pref.MessageDescriptor)
+ return mi.(protoreflect.MessageDescriptor)
}
// Slow-path: initialize MessageDescriptor from the raw descriptor.
mv := reflect.Zero(t).Interface()
- if _, ok := mv.(pref.ProtoMessage); ok {
+ if _, ok := mv.(protoreflect.ProtoMessage); ok {
panic(fmt.Sprintf("%v already implements proto.Message", t))
}
mdV1, ok := mv.(messageV1)
@@ -164,7 +162,7 @@ var (
//
// This is a best-effort derivation of the message descriptor using the protobuf
// tags on the struct fields.
-func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor {
aberrantMessageDescLock.Lock()
defer aberrantMessageDescLock.Unlock()
if aberrantMessageDescCache == nil {
@@ -172,7 +170,7 @@ func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDes
}
return aberrantLoadMessageDescReentrant(t, name)
}
-func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor {
// Fast-path: check if an MessageDescriptor is cached for this concrete type.
if md, ok := aberrantMessageDescCache[t]; ok {
return md
@@ -225,9 +223,9 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M
vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0]
for i := 0; i < vs.Len(); i++ {
v := vs.Index(i)
- md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{
- pref.FieldNumber(v.FieldByName("Start").Int()),
- pref.FieldNumber(v.FieldByName("End").Int() + 1),
+ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{
+ protoreflect.FieldNumber(v.FieldByName("Start").Int()),
+ protoreflect.FieldNumber(v.FieldByName("End").Int() + 1),
})
md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil)
}
@@ -245,7 +243,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M
n := len(md.L2.Oneofs.List)
md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{})
od := &md.L2.Oneofs.List[n]
- od.L0.FullName = md.FullName().Append(pref.Name(tag))
+ od.L0.FullName = md.FullName().Append(protoreflect.Name(tag))
od.L0.ParentFile = md.L0.ParentFile
od.L0.Parent = md
od.L0.Index = n
@@ -267,14 +265,14 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M
return md
}
-func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName {
+func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName {
if name.IsValid() {
return name
}
func() {
defer func() { recover() }() // swallow possible nil panics
if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok {
- name = pref.FullName(m.XXX_MessageName())
+ name = protoreflect.FullName(m.XXX_MessageName())
}
}()
if name.IsValid() {
@@ -305,7 +303,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
fd.L0.Index = n
if fd.L1.IsWeak || fd.L1.HasPacked {
- fd.L1.Options = func() pref.ProtoMessage {
+ fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
if fd.L1.IsWeak {
opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
@@ -318,17 +316,17 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
}
// Populate Enum and Message.
- if fd.Enum() == nil && fd.Kind() == pref.EnumKind {
+ if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind {
switch v := reflect.Zero(t).Interface().(type) {
- case pref.Enum:
+ case protoreflect.Enum:
fd.L1.Enum = v.Descriptor()
default:
fd.L1.Enum = LegacyLoadEnumDesc(t)
}
}
- if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) {
+ if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) {
switch v := reflect.Zero(t).Interface().(type) {
- case pref.ProtoMessage:
+ case protoreflect.ProtoMessage:
fd.L1.Message = v.ProtoReflect().Descriptor()
case messageV1:
fd.L1.Message = LegacyLoadMessageDesc(t)
@@ -337,13 +335,13 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
n := len(md.L1.Messages.List)
md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)})
md2 := &md.L1.Messages.List[n]
- md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name()))))
+ md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name()))))
md2.L0.ParentFile = md.L0.ParentFile
md2.L0.Parent = md
md2.L0.Index = n
md2.L1.IsMapEntry = true
- md2.L2.Options = func() pref.ProtoMessage {
+ md2.L2.Options = func() protoreflect.ProtoMessage {
opts := descopts.Message.ProtoReflect().New()
opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true))
return opts.Interface()
@@ -364,8 +362,8 @@ type placeholderEnumValues struct {
protoreflect.EnumValueDescriptors
}
-func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor {
- return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n)))
+func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
+ return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n)))
}
// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder.
@@ -383,7 +381,7 @@ type legacyMerger interface {
Merge(protoiface.MessageV1)
}
-var aberrantProtoMethods = &piface.Methods{
+var aberrantProtoMethods = &protoiface.Methods{
Marshal: legacyMarshal,
Unmarshal: legacyUnmarshal,
Merge: legacyMerge,
@@ -392,40 +390,40 @@ var aberrantProtoMethods = &piface.Methods{
// supports deterministic serialization or not, but this
// preserves the v1 implementation's behavior of always
// calling Marshal methods when present.
- Flags: piface.SupportMarshalDeterministic,
+ Flags: protoiface.SupportMarshalDeterministic,
}
-func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) {
+func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
v := in.Message.(unwrapper).protoUnwrap()
marshaler, ok := v.(legacyMarshaler)
if !ok {
- return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v)
+ return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v)
}
out, err := marshaler.Marshal()
if in.Buf != nil {
out = append(in.Buf, out...)
}
- return piface.MarshalOutput{
+ return protoiface.MarshalOutput{
Buf: out,
}, err
}
-func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
v := in.Message.(unwrapper).protoUnwrap()
unmarshaler, ok := v.(legacyUnmarshaler)
if !ok {
- return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v)
+ return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v)
}
- return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
+ return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
}
-func legacyMerge(in piface.MergeInput) piface.MergeOutput {
+func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput {
// Check whether this supports the legacy merger.
dstv := in.Destination.(unwrapper).protoUnwrap()
merger, ok := dstv.(legacyMerger)
if ok {
merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
- return piface.MergeOutput{Flags: piface.MergeComplete}
+ return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
// If legacy merger is unavailable, implement merge in terms of
@@ -433,29 +431,29 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput {
srcv := in.Source.(unwrapper).protoUnwrap()
marshaler, ok := srcv.(legacyMarshaler)
if !ok {
- return piface.MergeOutput{}
+ return protoiface.MergeOutput{}
}
dstv = in.Destination.(unwrapper).protoUnwrap()
unmarshaler, ok := dstv.(legacyUnmarshaler)
if !ok {
- return piface.MergeOutput{}
+ return protoiface.MergeOutput{}
}
if !in.Source.IsValid() {
// Legacy Marshal methods may not function on nil messages.
// Check for a typed nil source only after we confirm that
// legacy Marshal/Unmarshal methods are present, for
// consistency.
- return piface.MergeOutput{Flags: piface.MergeComplete}
+ return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
b, err := marshaler.Marshal()
if err != nil {
- return piface.MergeOutput{}
+ return protoiface.MergeOutput{}
}
err = unmarshaler.Unmarshal(b)
if err != nil {
- return piface.MergeOutput{}
+ return protoiface.MergeOutput{}
}
- return piface.MergeOutput{Flags: piface.MergeComplete}
+ return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
// aberrantMessageType implements MessageType for all types other than pointer-to-struct.
@@ -463,19 +461,19 @@ type aberrantMessageType struct {
t reflect.Type
}
-func (mt aberrantMessageType) New() pref.Message {
+func (mt aberrantMessageType) New() protoreflect.Message {
if mt.t.Kind() == reflect.Ptr {
return aberrantMessage{reflect.New(mt.t.Elem())}
}
return aberrantMessage{reflect.Zero(mt.t)}
}
-func (mt aberrantMessageType) Zero() pref.Message {
+func (mt aberrantMessageType) Zero() protoreflect.Message {
return aberrantMessage{reflect.Zero(mt.t)}
}
func (mt aberrantMessageType) GoType() reflect.Type {
return mt.t
}
-func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor {
+func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor {
return LegacyLoadMessageDesc(mt.t)
}
@@ -499,56 +497,56 @@ func (m aberrantMessage) Reset() {
}
}
-func (m aberrantMessage) ProtoReflect() pref.Message {
+func (m aberrantMessage) ProtoReflect() protoreflect.Message {
return m
}
-func (m aberrantMessage) Descriptor() pref.MessageDescriptor {
+func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor {
return LegacyLoadMessageDesc(m.v.Type())
}
-func (m aberrantMessage) Type() pref.MessageType {
+func (m aberrantMessage) Type() protoreflect.MessageType {
return aberrantMessageType{m.v.Type()}
}
-func (m aberrantMessage) New() pref.Message {
+func (m aberrantMessage) New() protoreflect.Message {
if m.v.Type().Kind() == reflect.Ptr {
return aberrantMessage{reflect.New(m.v.Type().Elem())}
}
return aberrantMessage{reflect.Zero(m.v.Type())}
}
-func (m aberrantMessage) Interface() pref.ProtoMessage {
+func (m aberrantMessage) Interface() protoreflect.ProtoMessage {
return m
}
-func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
return
}
-func (m aberrantMessage) Has(pref.FieldDescriptor) bool {
+func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool {
return false
}
-func (m aberrantMessage) Clear(pref.FieldDescriptor) {
+func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) {
panic("invalid Message.Clear on " + string(m.Descriptor().FullName()))
}
-func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value {
+func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
if fd.Default().IsValid() {
return fd.Default()
}
panic("invalid Message.Get on " + string(m.Descriptor().FullName()))
}
-func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) {
+func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) {
panic("invalid Message.Set on " + string(m.Descriptor().FullName()))
}
-func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value {
+func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value {
panic("invalid Message.Mutable on " + string(m.Descriptor().FullName()))
}
-func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value {
+func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value {
panic("invalid Message.NewField on " + string(m.Descriptor().FullName()))
}
-func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor {
+func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName()))
}
-func (m aberrantMessage) GetUnknown() pref.RawFields {
+func (m aberrantMessage) GetUnknown() protoreflect.RawFields {
return nil
}
-func (m aberrantMessage) SetUnknown(pref.RawFields) {
+func (m aberrantMessage) SetUnknown(protoreflect.RawFields) {
// SetUnknown discards its input on messages which don't support unknown field storage.
}
func (m aberrantMessage) IsValid() bool {
@@ -557,7 +555,7 @@ func (m aberrantMessage) IsValid() bool {
}
return false
}
-func (m aberrantMessage) ProtoMethods() *piface.Methods {
+func (m aberrantMessage) ProtoMethods() *protoiface.Methods {
return aberrantProtoMethods
}
func (m aberrantMessage) protoUnwrap() interface{} {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index c65bbc0446..7e65f64f28 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -9,8 +9,8 @@ import (
"reflect"
"google.golang.org/protobuf/proto"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
type mergeOptions struct{}
@@ -20,17 +20,17 @@ func (o mergeOptions) Merge(dst, src proto.Message) {
}
// merge is protoreflect.Methods.Merge.
-func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput {
+func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput {
dp, ok := mi.getPointer(in.Destination)
if !ok {
- return piface.MergeOutput{}
+ return protoiface.MergeOutput{}
}
sp, ok := mi.getPointer(in.Source)
if !ok {
- return piface.MergeOutput{}
+ return protoiface.MergeOutput{}
}
mi.mergePointer(dp, sp, mergeOptions{})
- return piface.MergeOutput{Flags: piface.MergeComplete}
+ return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
@@ -64,7 +64,7 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
continue
}
dx := (*dext)[num]
- var dv pref.Value
+ var dv protoreflect.Value
if dx.Type() == sx.Type() {
dv = dx.Value()
}
@@ -85,15 +85,15 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
}
}
-func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value {
return src
}
-func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value {
- return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...))
+func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value {
+ return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...))
}
-func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value {
dstl := dst.List()
srcl := src.List()
for i, llen := 0, srcl.Len(); i < llen; i++ {
@@ -102,29 +102,29 @@ func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
return dst
}
-func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value {
dstl := dst.List()
srcl := src.List()
for i, llen := 0, srcl.Len(); i < llen; i++ {
sb := srcl.Get(i).Bytes()
db := append(emptyBuf[:], sb...)
- dstl.Append(pref.ValueOfBytes(db))
+ dstl.Append(protoreflect.ValueOfBytes(db))
}
return dst
}
-func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value {
dstl := dst.List()
srcl := src.List()
for i, llen := 0, srcl.Len(); i < llen; i++ {
sm := srcl.Get(i).Message()
dm := proto.Clone(sm.Interface()).ProtoReflect()
- dstl.Append(pref.ValueOfMessage(dm))
+ dstl.Append(protoreflect.ValueOfMessage(dm))
}
return dst
}
-func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value {
opts.Merge(dst.Message().Interface(), src.Message().Interface())
return dst
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index a104e28e85..4f5fb67a0d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -14,8 +14,7 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/reflect/protoregistry"
)
// MessageInfo provides protobuf related functionality for a given Go type
@@ -29,7 +28,7 @@ type MessageInfo struct {
GoReflectType reflect.Type // pointer to struct
// Desc is the underlying message descriptor type and must be populated.
- Desc pref.MessageDescriptor
+ Desc protoreflect.MessageDescriptor
// Exporter must be provided in a purego environment in order to provide
// access to unexported fields.
@@ -54,7 +53,7 @@ type exporter func(v interface{}, i int) interface{}
// is generated by our implementation of protoc-gen-go (for v2 and on).
// If it is unable to obtain a MessageInfo, it returns nil.
func getMessageInfo(mt reflect.Type) *MessageInfo {
- m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage)
+ m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage)
if !ok {
return nil
}
@@ -97,7 +96,7 @@ func (mi *MessageInfo) initOnce() {
// getPointer returns the pointer for a message, which should be of
// the type of the MessageInfo. If the message is of a different type,
// it returns ok==false.
-func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) {
+func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) {
switch m := m.(type) {
case *messageState:
return m.pointer(), m.messageInfo() == mi
@@ -134,10 +133,10 @@ type structInfo struct {
extensionOffset offset
extensionType reflect.Type
- fieldsByNumber map[pref.FieldNumber]reflect.StructField
- oneofsByName map[pref.Name]reflect.StructField
- oneofWrappersByType map[reflect.Type]pref.FieldNumber
- oneofWrappersByNumber map[pref.FieldNumber]reflect.Type
+ fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField
+ oneofsByName map[protoreflect.Name]reflect.StructField
+ oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber
+ oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type
}
func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
@@ -147,10 +146,10 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
unknownOffset: invalidOffset,
extensionOffset: invalidOffset,
- fieldsByNumber: map[pref.FieldNumber]reflect.StructField{},
- oneofsByName: map[pref.Name]reflect.StructField{},
- oneofWrappersByType: map[reflect.Type]pref.FieldNumber{},
- oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{},
+ fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{},
+ oneofsByName: map[protoreflect.Name]reflect.StructField{},
+ oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{},
+ oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{},
}
fieldLoop:
@@ -180,12 +179,12 @@ fieldLoop:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
n, _ := strconv.ParseUint(s, 10, 64)
- si.fieldsByNumber[pref.FieldNumber(n)] = f
+ si.fieldsByNumber[protoreflect.FieldNumber(n)] = f
continue fieldLoop
}
}
if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 {
- si.oneofsByName[pref.Name(s)] = f
+ si.oneofsByName[protoreflect.Name(s)] = f
continue fieldLoop
}
}
@@ -208,8 +207,8 @@ fieldLoop:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
n, _ := strconv.ParseUint(s, 10, 64)
- si.oneofWrappersByType[tf] = pref.FieldNumber(n)
- si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf
+ si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n)
+ si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf
break
}
}
@@ -219,7 +218,11 @@ fieldLoop:
}
func (mi *MessageInfo) New() protoreflect.Message {
- return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface())
+ m := reflect.New(mi.GoReflectType.Elem()).Interface()
+ if r, ok := m.(protoreflect.ProtoMessage); ok {
+ return r.ProtoReflect()
+ }
+ return mi.MessageOf(m)
}
func (mi *MessageInfo) Zero() protoreflect.Message {
return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface())
@@ -237,7 +240,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
fd := mi.Desc.Fields().Get(i)
switch {
case fd.IsWeak():
- mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName())
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName())
return mt
case fd.IsMap():
return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index 9488b72613..d9ea010bef 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -10,17 +10,17 @@ import (
"google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/pragma"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type reflectMessageInfo struct {
- fields map[pref.FieldNumber]*fieldInfo
- oneofs map[pref.Name]*oneofInfo
+ fields map[protoreflect.FieldNumber]*fieldInfo
+ oneofs map[protoreflect.Name]*oneofInfo
// fieldTypes contains the zero value of an enum or message field.
// For lists, it contains the element type.
// For maps, it contains the entry value type.
- fieldTypes map[pref.FieldNumber]interface{}
+ fieldTypes map[protoreflect.FieldNumber]interface{}
// denseFields is a subset of fields where:
// 0 < fieldDesc.Number() < len(denseFields)
@@ -30,8 +30,8 @@ type reflectMessageInfo struct {
// rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
rangeInfos []interface{} // either *fieldInfo or *oneofInfo
- getUnknown func(pointer) pref.RawFields
- setUnknown func(pointer, pref.RawFields)
+ getUnknown func(pointer) protoreflect.RawFields
+ setUnknown func(pointer, protoreflect.RawFields)
extensionMap func(pointer) *extensionMap
nilMessage atomicNilMessage
@@ -52,7 +52,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) {
// This code assumes that the struct is well-formed and panics if there are
// any discrepancies.
func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
- mi.fields = map[pref.FieldNumber]*fieldInfo{}
+ mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
md := mi.Desc
fds := md.Fields()
for i := 0; i < fds.Len(); i++ {
@@ -82,7 +82,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
mi.fields[fd.Number()] = &fi
}
- mi.oneofs = map[pref.Name]*oneofInfo{}
+ mi.oneofs = map[protoreflect.Name]*oneofInfo{}
for i := 0; i < md.Oneofs().Len(); i++ {
od := md.Oneofs().Get(i)
mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter)
@@ -117,13 +117,13 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
switch {
case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType:
// Handle as []byte.
- mi.getUnknown = func(p pointer) pref.RawFields {
+ mi.getUnknown = func(p pointer) protoreflect.RawFields {
if p.IsNil() {
return nil
}
return *p.Apply(mi.unknownOffset).Bytes()
}
- mi.setUnknown = func(p pointer, b pref.RawFields) {
+ mi.setUnknown = func(p pointer, b protoreflect.RawFields) {
if p.IsNil() {
panic("invalid SetUnknown on nil Message")
}
@@ -131,7 +131,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
}
case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType:
// Handle as *[]byte.
- mi.getUnknown = func(p pointer) pref.RawFields {
+ mi.getUnknown = func(p pointer) protoreflect.RawFields {
if p.IsNil() {
return nil
}
@@ -141,7 +141,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
}
return **bp
}
- mi.setUnknown = func(p pointer, b pref.RawFields) {
+ mi.setUnknown = func(p pointer, b protoreflect.RawFields) {
if p.IsNil() {
panic("invalid SetUnknown on nil Message")
}
@@ -152,10 +152,10 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
**bp = b
}
default:
- mi.getUnknown = func(pointer) pref.RawFields {
+ mi.getUnknown = func(pointer) protoreflect.RawFields {
return nil
}
- mi.setUnknown = func(p pointer, _ pref.RawFields) {
+ mi.setUnknown = func(p pointer, _ protoreflect.RawFields) {
if p.IsNil() {
panic("invalid SetUnknown on nil Message")
}
@@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
}
if ft != nil {
if mi.fieldTypes == nil {
- mi.fieldTypes = make(map[pref.FieldNumber]interface{})
+ mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{})
}
mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
}
@@ -233,7 +233,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
type extensionMap map[int32]ExtensionField
-func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
if m != nil {
for _, x := range *m {
xd := x.Type().TypeDescriptor()
@@ -247,7 +247,7 @@ func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
}
}
}
-func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) {
+func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) {
if m == nil {
return false
}
@@ -266,10 +266,10 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) {
}
return true
}
-func (m *extensionMap) Clear(xt pref.ExtensionType) {
+func (m *extensionMap) Clear(xt protoreflect.ExtensionType) {
delete(*m, int32(xt.TypeDescriptor().Number()))
}
-func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value {
+func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value {
xd := xt.TypeDescriptor()
if m != nil {
if x, ok := (*m)[int32(xd.Number())]; ok {
@@ -278,7 +278,7 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value {
}
return xt.Zero()
}
-func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) {
+func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) {
xd := xt.TypeDescriptor()
isValid := true
switch {
@@ -302,9 +302,9 @@ func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) {
x.Set(xt, v)
(*m)[int32(xd.Number())] = x
}
-func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value {
+func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value {
xd := xt.TypeDescriptor()
- if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() {
+ if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() {
panic("invalid Mutable on field with non-composite type")
}
if x, ok := (*m)[int32(xd.Number())]; ok {
@@ -320,7 +320,6 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value {
// in an allocation-free way without needing to have a shadow Go type generated
// for every message type. This technique only works using unsafe.
//
-//
// Example generated code:
//
// type M struct {
@@ -351,12 +350,11 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value {
// It has access to the message info as its first field, and a pointer to the
// MessageState is identical to a pointer to the concrete message value.
//
-//
// Requirements:
-// • The type M must implement protoreflect.ProtoMessage.
-// • The address of m must not be nil.
-// • The address of m and the address of m.state must be equal,
-// even though they are different Go types.
+// - The type M must implement protoreflect.ProtoMessage.
+// - The address of m must not be nil.
+// - The address of m and the address of m.state must be equal,
+// even though they are different Go types.
type MessageState struct {
pragma.NoUnkeyedLiterals
pragma.DoNotCompare
@@ -368,8 +366,8 @@ type MessageState struct {
type messageState MessageState
var (
- _ pref.Message = (*messageState)(nil)
- _ unwrapper = (*messageState)(nil)
+ _ protoreflect.Message = (*messageState)(nil)
+ _ unwrapper = (*messageState)(nil)
)
// messageDataType is a tuple of a pointer to the message data and
@@ -387,16 +385,16 @@ type (
)
var (
- _ pref.Message = (*messageReflectWrapper)(nil)
- _ unwrapper = (*messageReflectWrapper)(nil)
- _ pref.ProtoMessage = (*messageIfaceWrapper)(nil)
- _ unwrapper = (*messageIfaceWrapper)(nil)
+ _ protoreflect.Message = (*messageReflectWrapper)(nil)
+ _ unwrapper = (*messageReflectWrapper)(nil)
+ _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil)
+ _ unwrapper = (*messageIfaceWrapper)(nil)
)
// MessageOf returns a reflective view over a message. The input must be a
// pointer to a named Go struct. If the provided type has a ProtoReflect method,
// it must be implemented by calling this method.
-func (mi *MessageInfo) MessageOf(m interface{}) pref.Message {
+func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message {
if reflect.TypeOf(m) != mi.GoReflectType {
panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
}
@@ -421,7 +419,7 @@ func (m *messageIfaceWrapper) Reset() {
rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
}
}
-func (m *messageIfaceWrapper) ProtoReflect() pref.Message {
+func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message {
return (*messageReflectWrapper)(m)
}
func (m *messageIfaceWrapper) protoUnwrap() interface{} {
@@ -430,7 +428,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} {
// checkField verifies that the provided field descriptor is valid.
// Exactly one of the returned values is populated.
-func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) {
+func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) {
var fi *fieldInfo
if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) {
fi = mi.denseFields[n]
@@ -455,7 +453,7 @@ func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.Ext
if !mi.Desc.ExtensionRanges().Has(fd.Number()) {
panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName()))
}
- xtd, ok := fd.(pref.ExtensionTypeDescriptor)
+ xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor)
if !ok {
panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 343cf87219..5e736c60ef 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -11,24 +11,24 @@ import (
"sync"
"google.golang.org/protobuf/internal/flags"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
)
type fieldInfo struct {
- fieldDesc pref.FieldDescriptor
+ fieldDesc protoreflect.FieldDescriptor
// These fields are used for protobuf reflection support.
has func(pointer) bool
clear func(pointer)
- get func(pointer) pref.Value
- set func(pointer, pref.Value)
- mutable func(pointer) pref.Value
- newMessage func() pref.Message
- newField func() pref.Value
+ get func(pointer) protoreflect.Value
+ set func(pointer, protoreflect.Value)
+ mutable func(pointer) protoreflect.Value
+ newMessage func() protoreflect.Message
+ newField func() protoreflect.Value
}
-func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo {
+func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo {
// This never occurs for generated message types.
// It implies that a hand-crafted type has missing Go fields
// for specific protobuf message fields.
@@ -40,19 +40,19 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo {
clear: func(p pointer) {
panic("missing Go struct field for " + string(fd.FullName()))
},
- get: func(p pointer) pref.Value {
+ get: func(p pointer) protoreflect.Value {
return fd.Default()
},
- set: func(p pointer, v pref.Value) {
+ set: func(p pointer, v protoreflect.Value) {
panic("missing Go struct field for " + string(fd.FullName()))
},
- mutable: func(p pointer) pref.Value {
+ mutable: func(p pointer) protoreflect.Value {
panic("missing Go struct field for " + string(fd.FullName()))
},
- newMessage: func() pref.Message {
+ newMessage: func() protoreflect.Message {
panic("missing Go struct field for " + string(fd.FullName()))
},
- newField: func() pref.Value {
+ newField: func() protoreflect.Value {
if v := fd.Default(); v.IsValid() {
return v
}
@@ -61,7 +61,7 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo {
}
}
-func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo {
+func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Interface {
panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft))
@@ -102,7 +102,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export
}
rv.Set(reflect.Zero(rv.Type()))
},
- get: func(p pointer) pref.Value {
+ get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
@@ -113,7 +113,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export
rv = rv.Elem().Elem().Field(0)
return conv.PBValueOf(rv)
},
- set: func(p pointer, v pref.Value) {
+ set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
rv.Set(reflect.New(ot))
@@ -121,7 +121,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export
rv = rv.Elem().Elem().Field(0)
rv.Set(conv.GoValueOf(v))
},
- mutable: func(p pointer) pref.Value {
+ mutable: func(p pointer) protoreflect.Value {
if !isMessage {
panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName()))
}
@@ -131,20 +131,20 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export
}
rv = rv.Elem().Elem().Field(0)
if rv.Kind() == reflect.Ptr && rv.IsNil() {
- rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message())))
+ rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message())))
}
return conv.PBValueOf(rv)
},
- newMessage: func() pref.Message {
+ newMessage: func() protoreflect.Message {
return conv.New().Message()
},
- newField: func() pref.Value {
+ newField: func() protoreflect.Value {
return conv.New()
},
}
}
-func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Map {
panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft))
@@ -166,7 +166,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
- get: func(p pointer) pref.Value {
+ get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
@@ -176,7 +176,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter
}
return conv.PBValueOf(rv)
},
- set: func(p pointer, v pref.Value) {
+ set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
pv := conv.GoValueOf(v)
if pv.IsNil() {
@@ -184,20 +184,20 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter
}
rv.Set(pv)
},
- mutable: func(p pointer) pref.Value {
+ mutable: func(p pointer) protoreflect.Value {
v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if v.IsNil() {
v.Set(reflect.MakeMap(fs.Type))
}
return conv.PBValueOf(v)
},
- newField: func() pref.Value {
+ newField: func() protoreflect.Value {
return conv.New()
},
}
}
-func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Slice {
panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft))
@@ -219,7 +219,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
- get: func(p pointer) pref.Value {
+ get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
@@ -229,7 +229,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte
}
return conv.PBValueOf(rv)
},
- set: func(p pointer, v pref.Value) {
+ set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
pv := conv.GoValueOf(v)
if pv.IsNil() {
@@ -237,11 +237,11 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte
}
rv.Set(pv.Elem())
},
- mutable: func(p pointer) pref.Value {
+ mutable: func(p pointer) protoreflect.Value {
v := p.Apply(fieldOffset).AsValueOf(fs.Type)
return conv.PBValueOf(v)
},
- newField: func() pref.Value {
+ newField: func() protoreflect.Value {
return conv.New()
},
}
@@ -252,7 +252,7 @@ var (
emptyBytes = reflect.ValueOf([]byte{})
)
-func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
nullable := fd.HasPresence()
isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
@@ -300,7 +300,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
- get: func(p pointer) pref.Value {
+ get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
@@ -315,7 +315,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor
}
return conv.PBValueOf(rv)
},
- set: func(p pointer, v pref.Value) {
+ set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable && rv.Kind() == reflect.Ptr {
if rv.IsNil() {
@@ -332,23 +332,23 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor
}
}
},
- newField: func() pref.Value {
+ newField: func() protoreflect.Value {
return conv.New()
},
}
}
-func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo {
+func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
if !flags.ProtoLegacy {
panic("no support for proto1 weak fields")
}
var once sync.Once
- var messageType pref.MessageType
+ var messageType protoreflect.MessageType
lazyInit := func() {
once.Do(func() {
messageName := fd.Message().FullName()
- messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
if messageType == nil {
panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
}
@@ -368,18 +368,18 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn
clear: func(p pointer) {
p.Apply(weakOffset).WeakFields().clear(num)
},
- get: func(p pointer) pref.Value {
+ get: func(p pointer) protoreflect.Value {
lazyInit()
if p.IsNil() {
- return pref.ValueOfMessage(messageType.Zero())
+ return protoreflect.ValueOfMessage(messageType.Zero())
}
m, ok := p.Apply(weakOffset).WeakFields().get(num)
if !ok {
- return pref.ValueOfMessage(messageType.Zero())
+ return protoreflect.ValueOfMessage(messageType.Zero())
}
- return pref.ValueOfMessage(m.ProtoReflect())
+ return protoreflect.ValueOfMessage(m.ProtoReflect())
},
- set: func(p pointer, v pref.Value) {
+ set: func(p pointer, v protoreflect.Value) {
lazyInit()
m := v.Message()
if m.Descriptor() != messageType.Descriptor() {
@@ -390,7 +390,7 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn
}
p.Apply(weakOffset).WeakFields().set(num, m.Interface())
},
- mutable: func(p pointer) pref.Value {
+ mutable: func(p pointer) protoreflect.Value {
lazyInit()
fs := p.Apply(weakOffset).WeakFields()
m, ok := fs.get(num)
@@ -398,20 +398,20 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn
m = messageType.New().Interface()
fs.set(num, m)
}
- return pref.ValueOfMessage(m.ProtoReflect())
+ return protoreflect.ValueOfMessage(m.ProtoReflect())
},
- newMessage: func() pref.Message {
+ newMessage: func() protoreflect.Message {
lazyInit()
return messageType.New()
},
- newField: func() pref.Value {
+ newField: func() protoreflect.Value {
lazyInit()
- return pref.ValueOfMessage(messageType.New())
+ return protoreflect.ValueOfMessage(messageType.New())
},
}
}
-func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
@@ -433,47 +433,47 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
- get: func(p pointer) pref.Value {
+ get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return conv.PBValueOf(rv)
},
- set: func(p pointer, v pref.Value) {
+ set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(conv.GoValueOf(v))
if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName()))
}
},
- mutable: func(p pointer) pref.Value {
+ mutable: func(p pointer) protoreflect.Value {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
rv.Set(conv.GoValueOf(conv.New()))
}
return conv.PBValueOf(rv)
},
- newMessage: func() pref.Message {
+ newMessage: func() protoreflect.Message {
return conv.New().Message()
},
- newField: func() pref.Value {
+ newField: func() protoreflect.Value {
return conv.New()
},
}
}
type oneofInfo struct {
- oneofDesc pref.OneofDescriptor
- which func(pointer) pref.FieldNumber
+ oneofDesc protoreflect.OneofDescriptor
+ which func(pointer) protoreflect.FieldNumber
}
-func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
oi := &oneofInfo{oneofDesc: od}
if od.IsSynthetic() {
fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
fieldOffset := offsetOf(fs, x)
- oi.which = func(p pointer) pref.FieldNumber {
+ oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
}
@@ -486,7 +486,7 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf
} else {
fs := si.oneofsByName[od.Name()]
fieldOffset := offsetOf(fs, x)
- oi.which = func(p pointer) pref.FieldNumber {
+ oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index 08cfb6054b..a24e6bbd7a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -16,9 +16,9 @@ import (
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
- pref "google.golang.org/protobuf/reflect/protoreflect"
- preg "google.golang.org/protobuf/reflect/protoregistry"
- piface "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// ValidationStatus is the result of validating the wire-format encoding of a message.
@@ -56,20 +56,20 @@ func (v ValidationStatus) String() string {
// of the message type.
//
// This function is exposed for testing.
-func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) {
+func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) {
mi, ok := mt.(*MessageInfo)
if !ok {
return out, ValidationUnknown
}
if in.Resolver == nil {
- in.Resolver = preg.GlobalTypes
+ in.Resolver = protoregistry.GlobalTypes
}
o, st := mi.validate(in.Buf, 0, unmarshalOptions{
flags: in.Flags,
resolver: in.Resolver,
})
if o.initialized {
- out.Flags |= piface.UnmarshalInitialized
+ out.Flags |= protoiface.UnmarshalInitialized
}
return out, st
}
@@ -106,22 +106,22 @@ const (
validationTypeMessageSetItem
)
-func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo {
var vi validationInfo
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
switch fd.Kind() {
- case pref.MessageKind:
+ case protoreflect.MessageKind:
vi.typ = validationTypeMessage
if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
vi.mi = getMessageInfo(ot.Field(0).Type)
}
- case pref.GroupKind:
+ case protoreflect.GroupKind:
vi.typ = validationTypeGroup
if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
vi.mi = getMessageInfo(ot.Field(0).Type)
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if strs.EnforceUTF8(fd) {
vi.typ = validationTypeUTF8String
}
@@ -129,7 +129,7 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip
default:
vi = newValidationInfo(fd, ft)
}
- if fd.Cardinality() == pref.Required {
+ if fd.Cardinality() == protoreflect.Required {
// Avoid overflow. The required field check is done with a 64-bit mask, with
// any message containing more than 64 required fields always reported as
// potentially uninitialized, so it is not important to get a precise count
@@ -142,22 +142,22 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip
return vi
}
-func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo {
var vi validationInfo
switch {
case fd.IsList():
switch fd.Kind() {
- case pref.MessageKind:
+ case protoreflect.MessageKind:
vi.typ = validationTypeMessage
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
- case pref.GroupKind:
+ case protoreflect.GroupKind:
vi.typ = validationTypeGroup
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
- case pref.StringKind:
+ case protoreflect.StringKind:
vi.typ = validationTypeBytes
if strs.EnforceUTF8(fd) {
vi.typ = validationTypeUTF8String
@@ -175,33 +175,33 @@ func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo
case fd.IsMap():
vi.typ = validationTypeMap
switch fd.MapKey().Kind() {
- case pref.StringKind:
+ case protoreflect.StringKind:
if strs.EnforceUTF8(fd) {
vi.keyType = validationTypeUTF8String
}
}
switch fd.MapValue().Kind() {
- case pref.MessageKind:
+ case protoreflect.MessageKind:
vi.valType = validationTypeMessage
if ft.Kind() == reflect.Map {
vi.mi = getMessageInfo(ft.Elem())
}
- case pref.StringKind:
+ case protoreflect.StringKind:
if strs.EnforceUTF8(fd) {
vi.valType = validationTypeUTF8String
}
}
default:
switch fd.Kind() {
- case pref.MessageKind:
+ case protoreflect.MessageKind:
vi.typ = validationTypeMessage
if !fd.IsWeak() {
vi.mi = getMessageInfo(ft)
}
- case pref.GroupKind:
+ case protoreflect.GroupKind:
vi.typ = validationTypeGroup
vi.mi = getMessageInfo(ft)
- case pref.StringKind:
+ case protoreflect.StringKind:
vi.typ = validationTypeBytes
if strs.EnforceUTF8(fd) {
vi.typ = validationTypeUTF8String
@@ -314,11 +314,11 @@ State:
break
}
messageName := fd.Message().FullName()
- messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+ messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName)
switch err {
case nil:
vi.mi, _ = messageType.(*MessageInfo)
- case preg.NotFound:
+ case protoregistry.NotFound:
vi.typ = validationTypeBytes
default:
return out, ValidationUnknown
@@ -335,7 +335,7 @@ State:
// unmarshaling to begin failing. Supporting this requires some way to
// determine if the resolver is frozen.
xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num)
- if err != nil && err != preg.NotFound {
+ if err != nil && err != protoregistry.NotFound {
return out, ValidationUnknown
}
if err == nil {
@@ -513,7 +513,7 @@ State:
}
xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid)
switch {
- case err == preg.NotFound:
+ case err == protoregistry.NotFound:
b = b[n:]
case err != nil:
return out, ValidationUnknown
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
index 009cbefd1e..eb79a7ba94 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go
@@ -7,7 +7,7 @@ package impl
import (
"fmt"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
@@ -17,32 +17,32 @@ import (
// defined directly on it.
type weakFields WeakFields
-func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) {
+func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) {
m, ok := w[int32(num)]
return m, ok
}
-func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) {
+func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) {
if *w == nil {
*w = make(weakFields)
}
(*w)[int32(num)] = m
}
-func (w *weakFields) clear(num pref.FieldNumber) {
+func (w *weakFields) clear(num protoreflect.FieldNumber) {
delete(*w, int32(num))
}
-func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool {
+func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool {
_, ok := w[int32(num)]
return ok
}
-func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) {
+func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) {
delete(*w, int32(num))
}
-func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage {
+func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage {
if m, ok := w[int32(num)]; ok {
return m
}
@@ -53,7 +53,7 @@ func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pr
return mt.Zero().Interface()
}
-func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) {
+func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) {
if m != nil {
mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
if mt == nil {
diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go
index 2a24953f6a..33745ed062 100644
--- a/vendor/google.golang.org/protobuf/internal/order/order.go
+++ b/vendor/google.golang.org/protobuf/internal/order/order.go
@@ -5,12 +5,12 @@
package order
import (
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// FieldOrder specifies the ordering to visit message fields.
// It is a function that reports whether x is ordered before y.
-type FieldOrder func(x, y pref.FieldDescriptor) bool
+type FieldOrder func(x, y protoreflect.FieldDescriptor) bool
var (
// AnyFieldOrder specifies no specific field ordering.
@@ -18,9 +18,9 @@ var (
// LegacyFieldOrder sorts fields in the same ordering as emitted by
// wire serialization in the github.com/golang/protobuf implementation.
- LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool {
ox, oy := x.ContainingOneof(), y.ContainingOneof()
- inOneof := func(od pref.OneofDescriptor) bool {
+ inOneof := func(od protoreflect.OneofDescriptor) bool {
return od != nil && !od.IsSynthetic()
}
@@ -41,14 +41,14 @@ var (
}
// NumberFieldOrder sorts fields by their field number.
- NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool {
return x.Number() < y.Number()
}
// IndexNameFieldOrder sorts non-extension fields before extension fields.
// Non-extensions are sorted according to their declaration index.
// Extensions are sorted according to their full name.
- IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool {
// Non-extension fields sort before extension fields.
if x.IsExtension() != y.IsExtension() {
return !x.IsExtension() && y.IsExtension()
@@ -64,7 +64,7 @@ var (
// KeyOrder specifies the ordering to visit map entries.
// It is a function that reports whether x is ordered before y.
-type KeyOrder func(x, y pref.MapKey) bool
+type KeyOrder func(x, y protoreflect.MapKey) bool
var (
// AnyKeyOrder specifies no specific key ordering.
@@ -72,7 +72,7 @@ var (
// GenericKeyOrder sorts false before true, numeric keys in ascending order,
// and strings in lexicographical ordering according to UTF-8 codepoints.
- GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool {
+ GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool {
switch x.Interface().(type) {
case bool:
return !x.Bool() && y.Bool()
diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
index c8090e0c54..1665a68e5b 100644
--- a/vendor/google.golang.org/protobuf/internal/order/range.go
+++ b/vendor/google.golang.org/protobuf/internal/order/range.go
@@ -9,12 +9,12 @@ import (
"sort"
"sync"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type messageField struct {
- fd pref.FieldDescriptor
- v pref.Value
+ fd protoreflect.FieldDescriptor
+ v protoreflect.Value
}
var messageFieldPool = sync.Pool{
@@ -25,8 +25,8 @@ type (
// FieldRnger is an interface for visiting all fields in a message.
// The protoreflect.Message type implements this interface.
FieldRanger interface{ Range(VisitField) }
- // VisitField is called everytime a message field is visited.
- VisitField = func(pref.FieldDescriptor, pref.Value) bool
+ // VisitField is called every time a message field is visited.
+ VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool
)
// RangeFields iterates over the fields of fs according to the specified order.
@@ -47,7 +47,7 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) {
}()
// Collect all fields in the message and sort them.
- fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
+ fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
fields = append(fields, messageField{fd, v})
return true
})
@@ -64,8 +64,8 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) {
}
type mapEntry struct {
- k pref.MapKey
- v pref.Value
+ k protoreflect.MapKey
+ v protoreflect.Value
}
var mapEntryPool = sync.Pool{
@@ -76,8 +76,8 @@ type (
// EntryRanger is an interface for visiting all fields in a message.
// The protoreflect.Map type implements this interface.
EntryRanger interface{ Range(VisitEntry) }
- // VisitEntry is called everytime a map entry is visited.
- VisitEntry = func(pref.MapKey, pref.Value) bool
+ // VisitEntry is called every time a map entry is visited.
+ VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool
)
// RangeEntries iterates over the entries of es according to the specified order.
@@ -98,7 +98,7 @@ func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) {
}()
// Collect all entries in the map and sort them.
- es.Range(func(k pref.MapKey, v pref.Value) bool {
+ es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, mapEntry{k, v})
return true
})
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
index 56a8a4ed3c..fea589c457 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -10,7 +10,7 @@ package strs
import (
"unsafe"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
type (
@@ -59,7 +59,7 @@ type Builder struct {
// AppendFullName is equivalent to protoreflect.FullName.Append,
// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
n := len(prefix) + len(".") + len(name)
if len(prefix) == 0 {
n -= len(".")
@@ -68,7 +68,7 @@ func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.Ful
sb.buf = append(sb.buf, prefix...)
sb.buf = append(sb.buf, '.')
sb.buf = append(sb.buf, name...)
- return pref.FullName(sb.last(n))
+ return protoreflect.FullName(sb.last(n))
}
// MakeString is equivalent to string(b), but optimized for large batches
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 3d40d5249e..b480c5010f 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -12,54 +12,54 @@ import (
// These constants determine the current version of this module.
//
-//
// For our release process, we enforce the following rules:
-// * Tagged releases use a tag that is identical to String.
-// * Tagged releases never reference a commit where the String
-// contains "devel".
-// * The set of all commits in this repository where String
-// does not contain "devel" must have a unique String.
-//
+// - Tagged releases use a tag that is identical to String.
+// - Tagged releases never reference a commit where the String
+// contains "devel".
+// - The set of all commits in this repository where String
+// does not contain "devel" must have a unique String.
//
// Steps for tagging a new release:
-// 1. Create a new CL.
//
-// 2. Update Minor, Patch, and/or PreRelease as necessary.
-// PreRelease must not contain the string "devel".
+// 1. Create a new CL.
//
-// 3. Since the last released minor version, have there been any changes to
-// generator that relies on new functionality in the runtime?
-// If yes, then increment RequiredGenerated.
+// 2. Update Minor, Patch, and/or PreRelease as necessary.
+// PreRelease must not contain the string "devel".
//
-// 4. Since the last released minor version, have there been any changes to
-// the runtime that removes support for old .pb.go source code?
-// If yes, then increment SupportMinimum.
+// 3. Since the last released minor version, have there been any changes to
+// generator that relies on new functionality in the runtime?
+// If yes, then increment RequiredGenerated.
//
-// 5. Send out the CL for review and submit it.
-// Note that the next CL in step 8 must be submitted after this CL
-// without any other CLs in-between.
+// 4. Since the last released minor version, have there been any changes to
+// the runtime that removes support for old .pb.go source code?
+// If yes, then increment SupportMinimum.
//
-// 6. Tag a new version, where the tag is is the current String.
+// 5. Send out the CL for review and submit it.
+// Note that the next CL in step 8 must be submitted after this CL
+// without any other CLs in-between.
//
-// 7. Write release notes for all notable changes
-// between this release and the last release.
+// 6. Tag a new version, where the tag is is the current String.
//
-// 8. Create a new CL.
+// 7. Write release notes for all notable changes
+// between this release and the last release.
//
-// 9. Update PreRelease to include the string "devel".
-// For example: "" -> "devel" or "rc.1" -> "rc.1.devel"
+// 8. Create a new CL.
//
-// 10. Send out the CL for review and submit it.
+// 9. Update PreRelease to include the string "devel".
+// For example: "" -> "devel" or "rc.1" -> "rc.1.devel"
+//
+// 10. Send out the CL for review and submit it.
const (
Major = 1
Minor = 28
- Patch = 0
+ Patch = 1
PreRelease = ""
)
// String formats the version string for this module in semver format.
//
// Examples:
+//
// v1.20.1
// v1.21.0-rc.1
func String() string {
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 11bf7173be..48d47946bb 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -19,7 +19,8 @@ import (
// UnmarshalOptions configures the unmarshaler.
//
// Example usage:
-// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m)
+//
+// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m)
type UnmarshalOptions struct {
pragma.NoUnkeyedLiterals
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
index c52d8c4ab7..08d2a46f53 100644
--- a/vendor/google.golang.org/protobuf/proto/doc.go
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -6,18 +6,17 @@
//
// For documentation on protocol buffers in general, see:
//
-// https://developers.google.com/protocol-buffers
+// https://developers.google.com/protocol-buffers
//
// For a tutorial on using protocol buffers with Go, see:
//
-// https://developers.google.com/protocol-buffers/docs/gotutorial
+// https://developers.google.com/protocol-buffers/docs/gotutorial
//
// For a guide to generated Go protocol buffer code, see:
//
-// https://developers.google.com/protocol-buffers/docs/reference/go-generated
+// https://developers.google.com/protocol-buffers/docs/reference/go-generated
//
-//
-// Binary serialization
+// # Binary serialization
//
// This package contains functions to convert to and from the wire format,
// an efficient binary serialization of protocol buffers.
@@ -30,8 +29,7 @@
// • Unmarshal converts a message from the wire format.
// The UnmarshalOptions type provides more control over wire unmarshaling.
//
-//
-// Basic message operations
+// # Basic message operations
//
// • Clone makes a deep copy of a message.
//
@@ -45,8 +43,7 @@
//
// • CheckInitialized reports whether all required fields in a message are set.
//
-//
-// Optional scalar constructors
+// # Optional scalar constructors
//
// The API for some generated messages represents optional scalar fields
// as pointers to a value. For example, an optional string field has the
@@ -61,16 +58,14 @@
//
// Optional scalar fields are only supported in proto2.
//
-//
-// Extension accessors
+// # Extension accessors
//
// • HasExtension, GetExtension, SetExtension, and ClearExtension
// access extension field values in a protocol buffer message.
//
// Extension fields are only supported in proto2.
//
-//
-// Related packages
+// # Related packages
//
// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to
// and from JSON.
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index d18239c237..bf7f816d0e 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -16,7 +16,8 @@ import (
// MarshalOptions configures the marshaler.
//
// Example usage:
-// b, err := MarshalOptions{Deterministic: true}.Marshal(m)
+//
+// b, err := MarshalOptions{Deterministic: true}.Marshal(m)
type MarshalOptions struct {
pragma.NoUnkeyedLiterals
@@ -101,7 +102,9 @@ func (o MarshalOptions) Marshal(m Message) ([]byte, error) {
// otherwise it returns a non-nil empty buffer.
//
// This is to assist the edge-case where user-code does the following:
+//
// m1.OptionalBytes, _ = proto.Marshal(m2)
+//
// where they expect the proto2 "optional_bytes" field to be populated
// if any only if m2 is a valid message.
func emptyBytesForMessage(m Message) []byte {
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 4dba2b9699..67948dd1df 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -10,7 +10,7 @@ import (
"reflect"
"google.golang.org/protobuf/encoding/protowire"
- pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// Equal reports whether two messages are equal.
@@ -33,6 +33,10 @@ func Equal(x, y Message) bool {
if x == nil || y == nil {
return x == nil && y == nil
}
+ if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y {
+ // Avoid an expensive comparison if both inputs are identical pointers.
+ return true
+ }
mx := x.ProtoReflect()
my := y.ProtoReflect()
if mx.IsValid() != my.IsValid() {
@@ -42,14 +46,14 @@ func Equal(x, y Message) bool {
}
// equalMessage compares two messages.
-func equalMessage(mx, my pref.Message) bool {
+func equalMessage(mx, my protoreflect.Message) bool {
if mx.Descriptor() != my.Descriptor() {
return false
}
nx := 0
equal := true
- mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
nx++
vy := my.Get(fd)
equal = my.Has(fd) && equalField(fd, vx, vy)
@@ -59,7 +63,7 @@ func equalMessage(mx, my pref.Message) bool {
return false
}
ny := 0
- my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
ny++
return true
})
@@ -71,7 +75,7 @@ func equalMessage(mx, my pref.Message) bool {
}
// equalField compares two fields.
-func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool {
+func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
switch {
case fd.IsList():
return equalList(fd, x.List(), y.List())
@@ -83,12 +87,12 @@ func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool {
}
// equalMap compares two maps.
-func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool {
+func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool {
if x.Len() != y.Len() {
return false
}
equal := true
- x.Range(func(k pref.MapKey, vx pref.Value) bool {
+ x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
vy := y.Get(k)
equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
return equal
@@ -97,7 +101,7 @@ func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool {
}
// equalList compares two lists.
-func equalList(fd pref.FieldDescriptor, x, y pref.List) bool {
+func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool {
if x.Len() != y.Len() {
return false
}
@@ -110,31 +114,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool {
}
// equalValue compares two singular values.
-func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool {
+func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
switch fd.Kind() {
- case pref.BoolKind:
+ case protoreflect.BoolKind:
return x.Bool() == y.Bool()
- case pref.EnumKind:
+ case protoreflect.EnumKind:
return x.Enum() == y.Enum()
- case pref.Int32Kind, pref.Sint32Kind,
- pref.Int64Kind, pref.Sint64Kind,
- pref.Sfixed32Kind, pref.Sfixed64Kind:
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind,
+ protoreflect.Int64Kind, protoreflect.Sint64Kind,
+ protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
return x.Int() == y.Int()
- case pref.Uint32Kind, pref.Uint64Kind,
- pref.Fixed32Kind, pref.Fixed64Kind:
+ case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
+ protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
return x.Uint() == y.Uint()
- case pref.FloatKind, pref.DoubleKind:
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
fx := x.Float()
fy := y.Float()
if math.IsNaN(fx) || math.IsNaN(fy) {
return math.IsNaN(fx) && math.IsNaN(fy)
}
return fx == fy
- case pref.StringKind:
+ case protoreflect.StringKind:
return x.String() == y.String()
- case pref.BytesKind:
+ case protoreflect.BytesKind:
return bytes.Equal(x.Bytes(), y.Bytes())
- case pref.MessageKind, pref.GroupKind:
+ case protoreflect.MessageKind, protoreflect.GroupKind:
return equalMessage(x.Message(), y.Message())
default:
return x.Interface() == y.Interface()
@@ -143,7 +147,7 @@ func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool {
// equalUnknown compares unknown fields by direct comparison on the raw bytes
// of each individual field number.
-func equalUnknown(x, y pref.RawFields) bool {
+func equalUnknown(x, y protoreflect.RawFields) bool {
if len(x) != len(y) {
return false
}
@@ -151,8 +155,8 @@ func equalUnknown(x, y pref.RawFields) bool {
return true
}
- mx := make(map[pref.FieldNumber]pref.RawFields)
- my := make(map[pref.FieldNumber]pref.RawFields)
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
for len(x) > 0 {
fnum, _, n := protowire.ConsumeField(x)
mx[fnum] = append(mx[fnum], x[:n]...)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
index cebb36cdad..27d7e35012 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -155,9 +155,9 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName,
//
// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar",
// then the following full names are searched:
-// * fizz.buzz.Foo.Bar
-// * fizz.Foo.Bar
-// * Foo.Bar
+// - fizz.buzz.Foo.Bar
+// - fizz.Foo.Bar
+// - Foo.Bar
func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) {
if !ref.IsValid() {
return nil, errors.New("invalid name reference: %q", ref)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
index dd85915bd4..55aa14922b 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -8,8 +8,7 @@
// defined in proto source files and value interfaces which provide the
// ability to examine and manipulate the contents of messages.
//
-//
-// Protocol Buffer Descriptors
+// # Protocol Buffer Descriptors
//
// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
// are immutable objects that represent protobuf type information.
@@ -26,8 +25,7 @@
// The "google.golang.org/protobuf/reflect/protodesc" package converts between
// google.protobuf.DescriptorProto messages and protobuf descriptors.
//
-//
-// Go Type Descriptors
+// # Go Type Descriptors
//
// A type descriptor (e.g., EnumType or MessageType) is a constructor for
// a concrete Go type that represents the associated protobuf descriptor.
@@ -41,8 +39,7 @@
// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
// create Go type descriptors from protobuf descriptors.
//
-//
-// Value Interfaces
+// # Value Interfaces
//
// The Enum and Message interfaces provide a reflective view over an
// enum or message instance. For enums, it provides the ability to retrieve
@@ -55,13 +52,11 @@
// The "github.com/golang/protobuf/proto".MessageReflect function can be used
// to obtain a reflective view on older messages.
//
-//
-// Relationships
+// # Relationships
//
// The following diagrams demonstrate the relationships between
// various types declared in this package.
//
-//
// ┌───────────────────────────────────┐
// V │
// ┌────────────── New(n) ─────────────┐ │
@@ -83,7 +78,6 @@
//
// • An Enum is a concrete enum instance. Generated enums implement Enum.
//
-//
// ┌──────────────── New() ─────────────────┐
// │ │
// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐
@@ -98,12 +92,22 @@
//
// • A MessageType describes a concrete Go message type.
// It has a MessageDescriptor and can construct a Message instance.
+// Just as how Go's reflect.Type is a reflective description of a Go type,
+// a MessageType is a reflective description of a Go type for a protobuf message.
//
// • A MessageDescriptor describes an abstract protobuf message type.
-//
-// • A Message is a concrete message instance. Generated messages implement
-// ProtoMessage, which can convert to/from a Message.
-//
+// It has no understanding of Go types. In order to construct a MessageType
+// from just a MessageDescriptor, you can consider looking up the message type
+// in the global registry using protoregistry.GlobalTypes.FindMessageByName
+// or constructing a dynamic MessageType using dynamicpb.NewMessageType.
+//
+// • A Message is a reflective view over a concrete message instance.
+// Generated messages implement ProtoMessage, which can convert to a Message.
+// Just as how Go's reflect.Value is a reflective view over a Go value,
+// a Message is a reflective view over a concrete protobuf message instance.
+// Using Go reflection as an analogy, the ProtoReflect method is similar to
+// calling reflect.ValueOf, and the Message.Interface method is similar to
+// calling reflect.Value.Interface.
//
// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐
// │ V │ V
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
index 121ba3a07b..0b99428855 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
@@ -87,6 +87,7 @@ func (p1 SourcePath) Equal(p2 SourcePath) bool {
// in a future version of this module.
//
// Example output:
+//
// .message_type[6].nested_type[15].field[3]
func (p SourcePath) String() string {
b := p.appendFileDescriptorProto(nil)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 8e53c44a91..3867470d30 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -480,6 +480,7 @@ type ExtensionDescriptors interface {
// relative to the parent that it is declared within.
//
// For example:
+//
// syntax = "proto2";
// package example;
// message FooMessage {
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index eb7764c307..ca8e28c5bc 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -50,6 +50,7 @@ import (
// always references the source object.
//
// For example:
+//
// // Append a 0 to a "repeated int32" field.
// // Since the Value returned by Mutable is guaranteed to alias
// // the source message, modifying the Value modifies the message.
@@ -392,6 +393,7 @@ func (v Value) MapKey() MapKey {
// ╚═════════╧═════════════════════════════════════╝
//
// A MapKey is constructed and accessed through a Value:
+//
// k := ValueOf("hash").MapKey() // convert string to MapKey
// s := k.String() // convert MapKey to string
//
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 59f024c444..58352a6978 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -30,9 +30,11 @@ import (
// conflictPolicy configures the policy for handling registration conflicts.
//
// It can be over-written at compile time with a linker-initialized variable:
+//
// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn"
//
// It can be over-written at program execution with an environment variable:
+//
// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main
//
// Neither of the above are covered by the compatibility promise and
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
index ff094e1ba4..a105cb23e0 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
@@ -26,16 +26,19 @@ const (
// EnforceVersion is used by code generated by protoc-gen-go
// to statically enforce minimum and maximum versions of this package.
// A compilation failure implies either that:
-// * the runtime package is too old and needs to be updated OR
-// * the generated code is too old and needs to be regenerated.
+// - the runtime package is too old and needs to be updated OR
+// - the generated code is too old and needs to be regenerated.
//
// The runtime package can be upgraded by running:
+//
// go get google.golang.org/protobuf
//
// The generated code can be regenerated by running:
+//
// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES}
//
// Example usage by generated code:
+//
// const (
// // Verify that this generated code is sufficiently up-to-date.
// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion)
@@ -49,6 +52,7 @@ const (
type EnforceVersion uint
// This enforces the following invariant:
+//
// MinVersion ≤ GenVersion ≤ MaxVersion
const (
_ = EnforceVersion(GenVersion - MinVersion)
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index 7f94443d26..1b2085d469 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -394,7 +394,7 @@ func numValidPaths(m proto.Message, paths []string) int {
// Identify the next message to search within.
md = fd.Message() // may be nil
- // Repeated fields are only allowed at the last postion.
+ // Repeated fields are only allowed at the last position.
if fd.IsList() || fd.IsMap() {
md = nil
}
diff --git a/vendor/k8s.io/klog/v2/.gitignore b/vendor/k8s.io/klog/v2/.gitignore
new file mode 100644
index 0000000000..0aa2002392
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/.gitignore
@@ -0,0 +1,17 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# OSX trash
+.DS_Store
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
+.idea/
+*.iml
+
+# Vscode files
+.vscode
diff --git a/vendor/k8s.io/klog/v2/CONTRIBUTING.md b/vendor/k8s.io/klog/v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..2641b1f41b
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+## Contact Information
+
+- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
diff --git a/vendor/k8s.io/klog/v2/LICENSE b/vendor/k8s.io/klog/v2/LICENSE
new file mode 100644
index 0000000000..37ec93a14f
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS
new file mode 100644
index 0000000000..a2fe8f351b
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/OWNERS
@@ -0,0 +1,14 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+reviewers:
+ - harshanarayana
+ - pohly
+approvers:
+ - dims
+ - thockin
+ - serathius
+emeritus_approvers:
+ - brancz
+ - justinsb
+ - lavalamp
+ - piosz
+ - tallclair
diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md
new file mode 100644
index 0000000000..d45cbe1720
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/README.md
@@ -0,0 +1,118 @@
+klog
+====
+
+klog is a permanent fork of https://github.com/golang/glog.
+
+## Why was klog created?
+
+The decision to create klog was one that wasn't made lightly, but it was necessary due to some
+drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README:
+
+> The code in this repo [...] is not itself under development
+
+This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below:
+
+ * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented.
+ * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it
+ * A long term goal is to implement a logging interface that allows us to add context, change output format, etc.
+
+Historical context is available here:
+
+ * https://github.com/kubernetes/kubernetes/issues/61006
+ * https://github.com/kubernetes/kubernetes/issues/70264
+ * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ
+ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ
+
+## Release versioning
+
+Semantic versioning is used in this repository. It contains several Go modules
+with different levels of stability:
+- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags
+- `examples` - no stable API, no tags, no intention to ever stabilize
+
+Exempt from the API stability guarantee are items (packages, functions, etc.)
+which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those
+may still change in incompatible ways or get removed entirely. This can only
+be used for code that is used in tests to avoid situations where non-test
+code from two different Kubernetes dependencies depends on incompatible
+releases of klog because an experimental API was changed.
+
+----
+
+How to use klog
+===============
+- Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"`
+- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags
+- You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`)
+- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`)
+- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md))
+- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog).
+
+**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater.
+
+### Coexisting with klog/v2
+
+See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2.
+
+### Coexisting with glog
+This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`.
+
+## Community, discussion, contribution, and support
+
+Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
+
+You can reach the maintainers of this project at:
+
+- [Slack](https://kubernetes.slack.com/messages/klog)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
+
+### Code of conduct
+
+Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
+
+----
+
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation of the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/k8s.io/klog/v2/RELEASE.md b/vendor/k8s.io/klog/v2/RELEASE.md
new file mode 100644
index 0000000000..b53eb960ce
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `klog` is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/k8s.io/klog/v2/SECURITY.md b/vendor/k8s.io/klog/v2/SECURITY.md
new file mode 100644
index 0000000000..2083d44cdf
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/SECURITY.md
@@ -0,0 +1,22 @@
+# Security Policy
+
+## Security Announcements
+
+Join the [kubernetes-security-announce] group for security and vulnerability announcements.
+
+You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss].
+
+## Reporting a Vulnerability
+
+Instructions for reporting a vulnerability can be found on the
+[Kubernetes Security and Disclosure Information] page.
+
+## Supported Versions
+
+Information about supported Kubernetes versions can be found on the
+[Kubernetes version and version skew support policy] page on the Kubernetes website.
+
+[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce
+[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50
+[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions
+[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability
diff --git a/vendor/k8s.io/klog/v2/SECURITY_CONTACTS b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
new file mode 100644
index 0000000000..6128a58699
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
@@ -0,0 +1,20 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+dims
+thockin
+justinsb
+tallclair
+piosz
+brancz
+DirectXMan12
+lavalamp
diff --git a/vendor/k8s.io/klog/v2/code-of-conduct.md b/vendor/k8s.io/klog/v2/code-of-conduct.md
new file mode 100644
index 0000000000..0d15c00cf3
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go
new file mode 100644
index 0000000000..2428963c0e
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/contextual.go
@@ -0,0 +1,186 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "context"
+
+ "github.com/go-logr/logr"
+)
+
+// This file provides the implementation of
+// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging
+//
+// SetLogger and ClearLogger were originally added to klog.go and got moved
+// here. Contextual logging adds a way to retrieve a Logger for direct logging
+// without the logging calls in klog.go.
+//
+// The global variables are expected to be modified only during sequential
+// parts of a program (init, serial tests) and therefore are not protected by
+// mutex locking.
+
+var (
+ // klogLogger is used as fallback for logging through the normal klog code
+ // when no Logger is set.
+ klogLogger logr.Logger = logr.New(&klogger{})
+)
+
+// SetLogger sets a Logger implementation that will be used as backing
+// implementation of the traditional klog log calls. klog will do its own
+// verbosity checks before calling logger.V().Info. logger.Error is always
+// called, regardless of the klog verbosity settings.
+//
+// If set, all log lines will be suppressed from the regular output, and
+// redirected to the logr implementation.
+// Use as:
+//
+// ...
+// klog.SetLogger(zapr.NewLogger(zapLog))
+//
+// To remove a backing logr implemention, use ClearLogger. Setting an
+// empty logger with SetLogger(logr.Logger{}) does not work.
+//
+// Modifying the logger is not thread-safe and should be done while no other
+// goroutines invoke log calls, usually during program initialization.
+func SetLogger(logger logr.Logger) {
+ SetLoggerWithOptions(logger)
+}
+
+// SetLoggerWithOptions is a more flexible version of SetLogger. Without
+// additional options, it behaves exactly like SetLogger. By passing
+// ContextualLogger(true) as option, it can be used to set a logger that then
+// will also get called directly by applications which retrieve it via
+// FromContext, Background, or TODO.
+//
+// Supporting direct calls is recommended because it avoids the overhead of
+// routing log entries through klogr into klog and then into the actual Logger
+// backend.
+func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) {
+ logging.logger = &logger
+ logging.loggerOptions = loggerOptions{}
+ for _, opt := range opts {
+ opt(&logging.loggerOptions)
+ }
+}
+
+// ContextualLogger determines whether the logger passed to
+// SetLoggerWithOptions may also get called directly. Such a logger cannot rely
+// on verbosity checking in klog.
+func ContextualLogger(enabled bool) LoggerOption {
+ return func(o *loggerOptions) {
+ o.contextualLogger = enabled
+ }
+}
+
+// FlushLogger provides a callback for flushing data buffered by the logger.
+func FlushLogger(flush func()) LoggerOption {
+ return func(o *loggerOptions) {
+ o.flush = flush
+ }
+}
+
+// LoggerOption implements the functional parameter paradigm for
+// SetLoggerWithOptions.
+type LoggerOption func(o *loggerOptions)
+
+type loggerOptions struct {
+ contextualLogger bool
+ flush func()
+}
+
+// ClearLogger removes a backing Logger implementation if one was set earlier
+// with SetLogger.
+//
+// Modifying the logger is not thread-safe and should be done while no other
+// goroutines invoke log calls, usually during program initialization.
+func ClearLogger() {
+ logging.logger = nil
+ logging.loggerOptions = loggerOptions{}
+}
+
+// EnableContextualLogging controls whether contextual logging is enabled.
+// By default it is enabled. When disabled, FromContext avoids looking up
+// the logger in the context and always returns the global logger.
+// LoggerWithValues, LoggerWithName, and NewContext become no-ops
+// and return their input logger respectively context. This may be useful
+// to avoid the additional overhead for contextual logging.
+//
+// This must be called during initialization before goroutines are started.
+func EnableContextualLogging(enabled bool) {
+ logging.contextualLoggingEnabled = enabled
+}
+
+// FromContext retrieves a logger set by the caller or, if not set,
+// falls back to the program's global logger (a Logger instance or klog
+// itself).
+func FromContext(ctx context.Context) Logger {
+ if logging.contextualLoggingEnabled {
+ if logger, err := logr.FromContext(ctx); err == nil {
+ return logger
+ }
+ }
+
+ return Background()
+}
+
+// TODO can be used as a last resort by code that has no means of
+// receiving a logger from its caller. FromContext or an explicit logger
+// parameter should be used instead.
+func TODO() Logger {
+ return Background()
+}
+
+// Background retrieves the fallback logger. It should not be called before
+// that logger was initialized by the program and not by code that should
+// better receive a logger via its parameters. TODO can be used as a temporary
+// solution for such code.
+func Background() Logger {
+ if logging.loggerOptions.contextualLogger {
+ // Is non-nil because logging.loggerOptions.contextualLogger is
+ // only true if a logger was set.
+ return *logging.logger
+ }
+
+ return klogLogger
+}
+
+// LoggerWithValues returns logger.WithValues(...kv) when
+// contextual logging is enabled, otherwise the logger.
+func LoggerWithValues(logger Logger, kv ...interface{}) Logger {
+ if logging.contextualLoggingEnabled {
+ return logger.WithValues(kv...)
+ }
+ return logger
+}
+
+// LoggerWithName returns logger.WithName(name) when contextual logging is
+// enabled, otherwise the logger.
+func LoggerWithName(logger Logger, name string) Logger {
+ if logging.contextualLoggingEnabled {
+ return logger.WithName(name)
+ }
+ return logger
+}
+
+// NewContext returns logr.NewContext(ctx, logger) when
+// contextual logging is enabled, otherwise ctx.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ if logging.contextualLoggingEnabled {
+ return logr.NewContext(ctx, logger)
+ }
+ return ctx
+}
diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go
new file mode 100644
index 0000000000..320a147728
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/exit.go
@@ -0,0 +1,69 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+// Copyright 2022 The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package klog
+
+import (
+ "fmt"
+ "os"
+ "time"
+)
+
+var (
+
+ // ExitFlushTimeout is the timeout that klog has traditionally used during
+ // calls like Fatal or Exit when flushing log data right before exiting.
+ // Applications that replace those calls and do not have some specific
+ // requirements like "exit immediately" can use this value as parameter
+ // for FlushAndExit.
+ //
+ // Can be set for testing purpose or to change the application's
+ // default.
+ ExitFlushTimeout = 10 * time.Second
+
+ // OsExit is the function called by FlushAndExit to terminate the program.
+ //
+ // Can be set for testing purpose or to change the application's
+ // default behavior. Note that the function should not simply return
+ // because callers of functions like Fatal will not expect that.
+ OsExit = os.Exit
+)
+
+// FlushAndExit flushes log data for a certain amount of time and then calls
+// os.Exit. Combined with some logging call it provides a replacement for
+// traditional calls like Fatal or Exit.
+func FlushAndExit(flushTimeout time.Duration, exitCode int) {
+ timeoutFlush(flushTimeout)
+ OsExit(exitCode)
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first. This is needed because the hooks invoked
+// by Flush may deadlock when klog.Fatal is called from a hook that holds
+// a lock. Flushing also might take too long.
+func timeoutFlush(timeout time.Duration) {
+ done := make(chan bool, 1)
+ go func() {
+ Flush() // calls logging.lockAndFlushAll()
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout)
+ }
+}
diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go
new file mode 100644
index 0000000000..602c3ed9e6
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/imports.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "github.com/go-logr/logr"
+)
+
+// The reason for providing these aliases is to allow code to work with logr
+// without directly importing it.
+
+// Logger in this package is exactly the same as logr.Logger.
+type Logger = logr.Logger
+
+// LogSink in this package is exactly the same as logr.LogSink.
+type LogSink = logr.LogSink
+
+// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo.
+type RuntimeInfo = logr.RuntimeInfo
+
+var (
+ // New is an alias for logr.New.
+ New = logr.New
+)
diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go
new file mode 100644
index 0000000000..ac88682a2c
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go
@@ -0,0 +1,159 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+// Copyright 2022 The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package buffer provides a cache for byte.Buffer instances that can be reused
+// to avoid frequent allocation and deallocation. It also has utility code
+// for log header formatting that use these buffers.
+package buffer
+
+import (
+ "bytes"
+ "os"
+ "sync"
+ "time"
+
+ "k8s.io/klog/v2/internal/severity"
+)
+
+var (
+ // Pid is inserted into log headers. Can be overridden for tests.
+ Pid = os.Getpid()
+)
+
+// Buffer holds a single byte.Buffer for reuse. The zero value is ready for
+// use. It also provides some helper methods for output formatting.
+type Buffer struct {
+ bytes.Buffer
+ Tmp [64]byte // temporary byte array for creating headers.
+ next *Buffer
+}
+
+// Buffers manages the reuse of individual buffer instances. It is thread-safe.
+type Buffers struct {
+ // mu protects the free list. It is separate from the main mutex
+ // so buffers can be grabbed and printed to without holding the main lock,
+ // for better parallelization.
+ mu sync.Mutex
+
+ // freeList is a list of byte buffers, maintained under mu.
+ freeList *Buffer
+}
+
+// GetBuffer returns a new, ready-to-use buffer.
+func (bl *Buffers) GetBuffer() *Buffer {
+ bl.mu.Lock()
+ b := bl.freeList
+ if b != nil {
+ bl.freeList = b.next
+ }
+ bl.mu.Unlock()
+ if b == nil {
+ b = new(Buffer)
+ } else {
+ b.next = nil
+ b.Reset()
+ }
+ return b
+}
+
+// PutBuffer returns a buffer to the free list.
+func (bl *Buffers) PutBuffer(b *Buffer) {
+ if b.Len() >= 256 {
+ // Let big buffers die a natural death.
+ return
+ }
+ bl.mu.Lock()
+ b.next = bl.freeList
+ bl.freeList = b
+ bl.mu.Unlock()
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i].
+func (buf *Buffer) twoDigits(i, d int) {
+ buf.Tmp[i+1] = digits[d%10]
+ d /= 10
+ buf.Tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.Tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *Buffer) nDigits(n, i, d int, pad byte) {
+ j := n - 1
+ for ; j >= 0 && d > 0; j-- {
+ buf.Tmp[i+j] = digits[d%10]
+ d /= 10
+ }
+ for ; j >= 0; j-- {
+ buf.Tmp[i+j] = pad
+ }
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i].
+func (buf *Buffer) someDigits(i, d int) int {
+ // Print into the top, then copy down. We know there's space for at least
+ // a 10-digit number.
+ j := len(buf.Tmp)
+ for {
+ j--
+ buf.Tmp[j] = digits[d%10]
+ d /= 10
+ if d == 0 {
+ break
+ }
+ }
+ return copy(buf.Tmp[i:], buf.Tmp[j:])
+}
+
+// FormatHeader formats a log header using the provided file name and line number.
+func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) {
+ if line < 0 {
+ line = 0 // not a real line number, but acceptable to someDigits
+ }
+ if s > severity.FatalLog {
+ s = severity.InfoLog // for safety.
+ }
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.Tmp[0] = severity.Char[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.Tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.Tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.Tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.Tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.Tmp[21] = ' '
+ buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID
+ buf.Tmp[29] = ' '
+ buf.Write(buf.Tmp[:30])
+ buf.WriteString(file)
+ buf.Tmp[0] = ':'
+ n := buf.someDigits(1, line)
+ buf.Tmp[n+1] = ']'
+ buf.Tmp[n+2] = ' '
+ buf.Write(buf.Tmp[:n+3])
+}
diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md
new file mode 100644
index 0000000000..03d692c8f8
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/clock/README.md
@@ -0,0 +1,7 @@
+# Clock
+
+This package provides an interface for time-based operations. It allows
+mocking time for testing.
+
+This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular
+dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog).
diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go
new file mode 100644
index 0000000000..b8b6af5c81
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clock
+
+import "time"
+
+// PassiveClock allows for injecting fake or real clocks into code
+// that needs to read the current time but does not support scheduling
+// activity in the future.
+type PassiveClock interface {
+ Now() time.Time
+ Since(time.Time) time.Duration
+}
+
+// Clock allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type Clock interface {
+ PassiveClock
+ // After returns the channel of a new Timer.
+ // This method does not allow to free/GC the backing timer before it fires. Use
+ // NewTimer instead.
+ After(d time.Duration) <-chan time.Time
+ // NewTimer returns a new Timer.
+ NewTimer(d time.Duration) Timer
+ // Sleep sleeps for the provided duration d.
+ // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
+ Sleep(d time.Duration)
+ // Tick returns the channel of a new Ticker.
+ // This method does not allow to free/GC the backing ticker. Use
+ // NewTicker from WithTicker instead.
+ Tick(d time.Duration) <-chan time.Time
+}
+
+// WithTicker allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type WithTicker interface {
+ Clock
+ // NewTicker returns a new Ticker.
+ NewTicker(time.Duration) Ticker
+}
+
+// WithDelayedExecution allows for injecting fake or real clocks into
+// code that needs to make use of AfterFunc functionality.
+type WithDelayedExecution interface {
+ Clock
+ // AfterFunc executes f in its own goroutine after waiting
+ // for d duration and returns a Timer whose channel can be
+ // closed by calling Stop() on the Timer.
+ AfterFunc(d time.Duration, f func()) Timer
+}
+
+// WithTickerAndDelayedExecution allows for injecting fake or real clocks
+// into code that needs Ticker and AfterFunc functionality
+type WithTickerAndDelayedExecution interface {
+ WithTicker
+ // AfterFunc executes f in its own goroutine after waiting
+ // for d duration and returns a Timer whose channel can be
+ // closed by calling Stop() on the Timer.
+ AfterFunc(d time.Duration, f func()) Timer
+}
+
+// Ticker defines the Ticker interface.
+type Ticker interface {
+ C() <-chan time.Time
+ Stop()
+}
+
+var _ = WithTicker(RealClock{})
+
+// RealClock really calls time.Now()
+type RealClock struct{}
+
+// Now returns the current time.
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+
+// Since returns time since the specified timestamp.
+func (RealClock) Since(ts time.Time) time.Duration {
+ return time.Since(ts)
+}
+
+// After is the same as time.After(d).
+// This method does not allow to free/GC the backing timer before it fires. Use
+// NewTimer instead.
+func (RealClock) After(d time.Duration) <-chan time.Time {
+ return time.After(d)
+}
+
+// NewTimer is the same as time.NewTimer(d)
+func (RealClock) NewTimer(d time.Duration) Timer {
+ return &realTimer{
+ timer: time.NewTimer(d),
+ }
+}
+
+// AfterFunc is the same as time.AfterFunc(d, f).
+func (RealClock) AfterFunc(d time.Duration, f func()) Timer {
+ return &realTimer{
+ timer: time.AfterFunc(d, f),
+ }
+}
+
+// Tick is the same as time.Tick(d)
+// This method does not allow to free/GC the backing ticker. Use
+// NewTicker instead.
+func (RealClock) Tick(d time.Duration) <-chan time.Time {
+ return time.Tick(d)
+}
+
+// NewTicker returns a new Ticker.
+func (RealClock) NewTicker(d time.Duration) Ticker {
+ return &realTicker{
+ ticker: time.NewTicker(d),
+ }
+}
+
+// Sleep is the same as time.Sleep(d)
+// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
+func (RealClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// Timer allows for injecting fake or real timers into code that
+// needs to do arbitrary things based on time.
+type Timer interface {
+ C() <-chan time.Time
+ Stop() bool
+ Reset(d time.Duration) bool
+}
+
+var _ = Timer(&realTimer{})
+
+// realTimer is backed by an actual time.Timer.
+type realTimer struct {
+ timer *time.Timer
+}
+
+// C returns the underlying timer's channel.
+func (r *realTimer) C() <-chan time.Time {
+ return r.timer.C
+}
+
+// Stop calls Stop() on the underlying timer.
+func (r *realTimer) Stop() bool {
+ return r.timer.Stop()
+}
+
+// Reset calls Reset() on the underlying timer.
+func (r *realTimer) Reset(d time.Duration) bool {
+ return r.timer.Reset(d)
+}
+
+type realTicker struct {
+ ticker *time.Ticker
+}
+
+func (r *realTicker) C() <-chan time.Time {
+ return r.ticker.C
+}
+
+func (r *realTicker) Stop() {
+ r.ticker.Stop()
+}
diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go
new file mode 100644
index 0000000000..f27bd14472
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go
@@ -0,0 +1,42 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package dbg provides some helper code for call traces.
+package dbg
+
+import (
+ "runtime"
+)
+
+// Stacks is a wrapper for runtime.Stack that attempts to recover the data for
+// all goroutines or the calling one.
+func Stacks(all bool) []byte {
+ // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+ n := 10000
+ if all {
+ n = 100000
+ }
+ var trace []byte
+ for i := 0; i < 5; i++ {
+ trace = make([]byte, n)
+ nbytes := runtime.Stack(trace, all)
+ if nbytes < len(trace) {
+ return trace[:nbytes]
+ }
+ n *= 2
+ }
+ return trace
+}
diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
new file mode 100644
index 0000000000..ad6bf11165
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
@@ -0,0 +1,253 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serialize
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/go-logr/logr"
+)
+
+// WithValues implements LogSink.WithValues. The old key/value pairs are
+// assumed to be well-formed, the new ones are checked and padded if
+// necessary. It returns a new slice.
+func WithValues(oldKV, newKV []interface{}) []interface{} {
+ if len(newKV) == 0 {
+ return oldKV
+ }
+ newLen := len(oldKV) + len(newKV)
+ hasMissingValue := newLen%2 != 0
+ if hasMissingValue {
+ newLen++
+ }
+ // The new LogSink must have its own slice.
+ kv := make([]interface{}, 0, newLen)
+ kv = append(kv, oldKV...)
+ kv = append(kv, newKV...)
+ if hasMissingValue {
+ kv = append(kv, missingValue)
+ }
+ return kv
+}
+
+// MergeKVs deduplicates elements provided in two key/value slices.
+//
+// Keys in each slice are expected to be unique, so duplicates can only occur
+// when the first and second slice contain the same key. When that happens, the
+// key/value pair from the second slice is used. The first slice must be well-formed
+// (= even key/value pairs). The second one may have a missing value, in which
+// case the special "missing value" is added to the result.
+func MergeKVs(first, second []interface{}) []interface{} {
+ maxLength := len(first) + (len(second)+1)/2*2
+ if maxLength == 0 {
+ // Nothing to do at all.
+ return nil
+ }
+
+ if len(first) == 0 && len(second)%2 == 0 {
+ // Nothing to be overridden, second slice is well-formed
+ // and can be used directly.
+ return second
+ }
+
+ // Determine which keys are in the second slice so that we can skip
+ // them when iterating over the first one. The code intentionally
+ // favors performance over completeness: we assume that keys are string
+ // constants and thus compare equal when the string values are equal. A
+ // string constant being overridden by, for example, a fmt.Stringer is
+ // not handled.
+ overrides := map[interface{}]bool{}
+ for i := 0; i < len(second); i += 2 {
+ overrides[second[i]] = true
+ }
+ merged := make([]interface{}, 0, maxLength)
+ for i := 0; i+1 < len(first); i += 2 {
+ key := first[i]
+ if overrides[key] {
+ continue
+ }
+ merged = append(merged, key, first[i+1])
+ }
+ merged = append(merged, second...)
+ if len(merged)%2 != 0 {
+ merged = append(merged, missingValue)
+ }
+ return merged
+}
+
+const missingValue = "(MISSING)"
+
+// KVListFormat serializes all key/value pairs into the provided buffer.
+// A space gets inserted before the first pair and between each pair.
+func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
+ for i := 0; i < len(keysAndValues); i += 2 {
+ var v interface{}
+ k := keysAndValues[i]
+ if i+1 < len(keysAndValues) {
+ v = keysAndValues[i+1]
+ } else {
+ v = missingValue
+ }
+ b.WriteByte(' ')
+ // Keys are assumed to be well-formed according to
+ // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
+ // for the sake of performance. Keys with spaces,
+ // special characters, etc. will break parsing.
+ if sK, ok := k.(string); ok {
+ // Avoid one allocation when the key is a string, which
+ // normally it should be.
+ b.WriteString(sK)
+ } else {
+ b.WriteString(fmt.Sprintf("%s", k))
+ }
+
+ // The type checks are sorted so that more frequently used ones
+ // come first because that is then faster in the common
+ // cases. In Kubernetes, ObjectRef (a Stringer) is more common
+ // than plain strings
+ // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
+ switch v := v.(type) {
+ case fmt.Stringer:
+ writeStringValue(b, true, StringerToString(v))
+ case string:
+ writeStringValue(b, true, v)
+ case error:
+ writeStringValue(b, true, ErrorToString(v))
+ case logr.Marshaler:
+ value := MarshalerToValue(v)
+ // A marshaler that returns a string is useful for
+ // delayed formatting of complex values. We treat this
+ // case like a normal string. This is useful for
+ // multi-line support.
+ //
+ // We could do this by recursively formatting a value,
+ // but that comes with the risk of infinite recursion
+ // if a marshaler returns itself. Instead we call it
+ // only once and rely on it returning the intended
+ // value directly.
+ switch value := value.(type) {
+ case string:
+ writeStringValue(b, true, value)
+ default:
+ writeStringValue(b, false, fmt.Sprintf("%+v", value))
+ }
+ case []byte:
+ // In https://github.com/kubernetes/klog/pull/237 it was decided
+ // to format byte slices with "%+q". The advantages of that are:
+ // - readable output if the bytes happen to be printable
+ // - non-printable bytes get represented as unicode escape
+ // sequences (\uxxxx)
+ //
+ // The downsides are that we cannot use the faster
+ // strconv.Quote here and that multi-line output is not
+ // supported. If developers know that a byte array is
+ // printable and they want multi-line output, they can
+ // convert the value to string before logging it.
+ b.WriteByte('=')
+ b.WriteString(fmt.Sprintf("%+q", v))
+ default:
+ writeStringValue(b, false, fmt.Sprintf("%+v", v))
+ }
+ }
+}
+
+// StringerToString converts a Stringer to a string,
+// handling panics if they occur.
+func StringerToString(s fmt.Stringer) (ret string) {
+ defer func() {
+ if err := recover(); err != nil {
+ ret = fmt.Sprintf("<panic: %s>", err)
+ }
+ }()
+ ret = s.String()
+ return
+}
+
+// MarshalerToValue invokes a marshaler and catches
+// panics.
+func MarshalerToValue(m logr.Marshaler) (ret interface{}) {
+ defer func() {
+ if err := recover(); err != nil {
+ ret = fmt.Sprintf("<panic: %s>", err)
+ }
+ }()
+ ret = m.MarshalLog()
+ return
+}
+
+// ErrorToString converts an error to a string,
+// handling panics if they occur.
+func ErrorToString(err error) (ret string) {
+ defer func() {
+ if err := recover(); err != nil {
+ ret = fmt.Sprintf("<panic: %s>", err)
+ }
+ }()
+ ret = err.Error()
+ return
+}
+
+func writeStringValue(b *bytes.Buffer, quote bool, v string) {
+ data := []byte(v)
+ index := bytes.IndexByte(data, '\n')
+ if index == -1 {
+ b.WriteByte('=')
+ if quote {
+ // Simple string, quote quotation marks and non-printable characters.
+ b.WriteString(strconv.Quote(v))
+ return
+ }
+ // Non-string with no line breaks.
+ b.WriteString(v)
+ return
+ }
+
+ // Complex multi-line string, show as-is with indention like this:
+ // I... "hello world" key=<
+ // <tab>line 1
+ // <tab>line 2
+ // >
+ //
+ // Tabs indent the lines of the value while the end of string delimiter
+ // is indented with a space. That has two purposes:
+ // - visual difference between the two for a human reader because indention
+ // will be different
+ // - no ambiguity when some value line starts with the end delimiter
+ //
+ // One downside is that the output cannot distinguish between strings that
+ // end with a line break and those that don't because the end delimiter
+ // will always be on the next line.
+ b.WriteString("=<\n")
+ for index != -1 {
+ b.WriteByte('\t')
+ b.Write(data[0 : index+1])
+ data = data[index+1:]
+ index = bytes.IndexByte(data, '\n')
+ }
+ if len(data) == 0 {
+ // String ended with line break, don't add another.
+ b.WriteString(" >")
+ } else {
+ // No line break at end of last line, write rest of string and
+ // add one.
+ b.WriteByte('\t')
+ b.Write(data)
+ b.WriteString("\n >")
+ }
+}
diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go
new file mode 100644
index 0000000000..30fa1834f0
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go
@@ -0,0 +1,58 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+// Copyright 2022 The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package severity provides definitions for klog severity (info, warning, ...)
+package severity
+
+import (
+ "strings"
+)
+
+// severity identifies the sort of log: info, warning etc. The binding to flag.Value
+// is handled in klog.go
+type Severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+ InfoLog Severity = iota
+ WarningLog
+ ErrorLog
+ FatalLog
+ NumSeverity = 4
+)
+
+// Char contains one shortcut letter per severity level.
+const Char = "IWEF"
+
+// Name contains one name per severity level.
+var Name = []string{
+ InfoLog: "INFO",
+ WarningLog: "WARNING",
+ ErrorLog: "ERROR",
+ FatalLog: "FATAL",
+}
+
+// ByName looks up a severity level by name.
+func ByName(s string) (Severity, bool) {
+ s = strings.ToUpper(s)
+ for i, name := range Name {
+ if name == s {
+ return Severity(i), true
+ }
+ }
+ return 0, false
+}
diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go
new file mode 100644
index 0000000000..2c218f698c
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/k8s_references.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/go-logr/logr"
+)
+
+// ObjectRef references a kubernetes object
+type ObjectRef struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace,omitempty"`
+}
+
+func (ref ObjectRef) String() string {
+ if ref.Namespace != "" {
+ return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
+ }
+ return ref.Name
+}
+
+// MarshalLog ensures that loggers with support for structured output will log
+// as a struct by removing the String method via a custom type.
+func (ref ObjectRef) MarshalLog() interface{} {
+ type or ObjectRef
+ return or(ref)
+}
+
+var _ logr.Marshaler = ObjectRef{}
+
+// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+// this interface may expand in the future, but will always be a subset of the
+// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+type KMetadata interface {
+ GetName() string
+ GetNamespace() string
+}
+
+// KObj returns ObjectRef from ObjectMeta
+func KObj(obj KMetadata) ObjectRef {
+ if obj == nil {
+ return ObjectRef{}
+ }
+ if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() {
+ return ObjectRef{}
+ }
+
+ return ObjectRef{
+ Name: obj.GetName(),
+ Namespace: obj.GetNamespace(),
+ }
+}
+
+// KRef returns ObjectRef from name and namespace
+func KRef(namespace, name string) ObjectRef {
+ return ObjectRef{
+ Name: name,
+ Namespace: namespace,
+ }
+}
+
+// KObjs returns slice of ObjectRef from an slice of ObjectMeta
+//
+// DEPRECATED: Use KObjSlice instead, it has better performance.
+func KObjs(arg interface{}) []ObjectRef {
+ s := reflect.ValueOf(arg)
+ if s.Kind() != reflect.Slice {
+ return nil
+ }
+ objectRefs := make([]ObjectRef, 0, s.Len())
+ for i := 0; i < s.Len(); i++ {
+ if v, ok := s.Index(i).Interface().(KMetadata); ok {
+ objectRefs = append(objectRefs, KObj(v))
+ } else {
+ return nil
+ }
+ }
+ return objectRefs
+}
+
+// KObjSlice takes a slice of objects that implement the KMetadata interface
+// and returns an object that gets logged as a slice of ObjectRef values or a
+// string containing those values, depending on whether the logger prefers text
+// output or structured output.
+//
+// An error string is logged when KObjSlice is not passed a suitable slice.
+//
+// Processing of the argument is delayed until the value actually gets logged,
+// in contrast to KObjs where that overhead is incurred regardless of whether
+// the result is needed.
+func KObjSlice(arg interface{}) interface{} {
+ return kobjSlice{arg: arg}
+}
+
+type kobjSlice struct {
+ arg interface{}
+}
+
+var _ fmt.Stringer = kobjSlice{}
+var _ logr.Marshaler = kobjSlice{}
+
+func (ks kobjSlice) String() string {
+ objectRefs, err := ks.process()
+ if err != nil {
+ return err.Error()
+ }
+ return fmt.Sprintf("%v", objectRefs)
+}
+
+func (ks kobjSlice) MarshalLog() interface{} {
+ objectRefs, err := ks.process()
+ if err != nil {
+ return err.Error()
+ }
+ return objectRefs
+}
+
+func (ks kobjSlice) process() ([]interface{}, error) {
+ s := reflect.ValueOf(ks.arg)
+ switch s.Kind() {
+ case reflect.Invalid:
+ // nil parameter, print as nil.
+ return nil, nil
+ case reflect.Slice:
+ // Okay, handle below.
+ default:
+ return nil, fmt.Errorf("<KObjSlice needs a slice, got type %T>", ks.arg)
+ }
+ objectRefs := make([]interface{}, 0, s.Len())
+ for i := 0; i < s.Len(); i++ {
+ item := s.Index(i).Interface()
+ if item == nil {
+ objectRefs = append(objectRefs, nil)
+ } else if v, ok := item.(KMetadata); ok {
+ objectRefs = append(objectRefs, KObj(v))
+ } else {
+ return nil, fmt.Errorf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
+ }
+ }
+ return objectRefs, nil
+}
diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
new file mode 100644
index 0000000000..1bd11b6754
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog.go
@@ -0,0 +1,1689 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+// klog.Info("Prepare to repel boarders")
+//
+// klog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+// if klog.V(2) {
+// klog.Info("Starting transaction...")
+// }
+//
+// klog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to standard error.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+// -logtostderr=true
+// Logs are written to standard error instead of to files.
+// This shortcuts most of the usual output routing:
+// -alsologtostderr, -stderrthreshold and -log_dir have no
+// effect and output redirection at runtime with SetOutput is
+// ignored.
+// -alsologtostderr=false
+// Logs are written to standard error as well as to files.
+// -stderrthreshold=ERROR
+// Log events at or above this severity are logged to standard
+// error as well as to files.
+// -log_dir=""
+// Log files will be written to this directory instead of the
+// default temporary directory.
+//
+// Other flags provide aids to debugging.
+//
+// -log_backtrace_at=""
+// When set to a file and line number holding a logging statement,
+// such as
+// -log_backtrace_at=gopherflakes.go:234
+// a stack trace will be written to the Info log whenever execution
+// hits that statement. (Unlike with -vmodule, the ".go" must be
+// present.)
+// -v=0
+// Enable V-leveled logging at the specified level.
+// -vmodule=""
+// The syntax of the argument is a comma-separated list of pattern=N,
+// where pattern is a literal file name (minus the ".go" suffix) or
+// "glob" pattern and N is a V level. For instance,
+// -vmodule=gopher*=3
+// sets the V level to 3 in all Go files whose names begin "gopher".
+package klog
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ stdLog "log"
+ "math"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-logr/logr"
+
+ "k8s.io/klog/v2/internal/buffer"
+ "k8s.io/klog/v2/internal/clock"
+ "k8s.io/klog/v2/internal/dbg"
+ "k8s.io/klog/v2/internal/serialize"
+ "k8s.io/klog/v2/internal/severity"
+)
+
+// severityValue identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severityValue struct {
+ severity.Severity
+}
+
+// get returns the value of the severity.
+func (s *severityValue) get() severity.Severity {
+ return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity)))
+}
+
+// set sets the value of the severity.
+func (s *severityValue) set(val severity.Severity) {
+ atomic.StoreInt32((*int32)(&s.Severity), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severityValue) String() string {
+ return strconv.FormatInt(int64(s.Severity), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (s *severityValue) Get() interface{} {
+ return s.Severity
+}
+
+// Set is part of the flag.Value interface.
+func (s *severityValue) Set(value string) error {
+ var threshold severity.Severity
+ // Is it a known name?
+ if v, ok := severity.ByName(value); ok {
+ threshold = v
+ } else {
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ threshold = severity.Severity(v)
+ }
+ logging.stderrThreshold.set(threshold)
+ return nil
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+ lines int64
+ bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+ return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+ return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+ Info, Warning, Error OutputStats
+}
+
+var severityStats = [severity.NumSeverity]*OutputStats{
+ severity.InfoLog: &Stats.Info,
+ severity.WarningLog: &Stats.Warning,
+ severity.ErrorLog: &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+ return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+ atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+ return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(Level(v), logging.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return m.serialize()
+}
+
+func (m *moduleSpec) serialize() string {
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Set will sets module value
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ filter, err := parseModuleSpec(value)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(logging.verbosity, filter, true)
+ return nil
+}
+
+func parseModuleSpec(value string) ([]modulePat, error) {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return nil, errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.ParseInt(patLev[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return nil, errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ return filter, nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+ file string
+ line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+ return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+ if t.line != line {
+ return false
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ file = file[i+1:]
+ }
+ return t.file == file
+}
+
+func (t *traceLocation) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+ return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Set will sets backtrace value
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+ if value == "" {
+ // Unset.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = 0
+ t.file = ""
+ return nil
+ }
+ fields := strings.Split(value, ":")
+ if len(fields) != 2 {
+ return errTraceSyntax
+ }
+ file, line := fields[0], fields[1]
+ if !strings.Contains(file, ".") {
+ return errTraceSyntax
+ }
+ v, err := strconv.Atoi(line)
+ if err != nil {
+ return errTraceSyntax
+ }
+ if v <= 0 {
+ return errors.New("negative or zero value for level")
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = v
+ t.file = file
+ return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+ Flush() error
+ Sync() error
+ io.Writer
+}
+
+var logging loggingT
+var commandLine flag.FlagSet
+
+// init sets up the defaults and creates command line flags.
+func init() {
+ commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)")
+ commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)")
+ commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800,
+ "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+
+ "If the value is 0, the maximum file size is unlimited.")
+ commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files")
+ commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)")
+ logging.setVState(0, nil, false)
+ commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity")
+ commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages")
+ commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
+ commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)")
+ commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)")
+ logging.stderrThreshold = severityValue{
+ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR.
+ }
+ commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)")
+ commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+ logging.settings.contextualLoggingEnabled = true
+ logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil)
+}
+
+// InitFlags is for explicitly initializing the flags.
+// It may get called repeatedly for different flagsets, but not
+// twice for the same one. May get called concurrently
+// to other goroutines using klog. However, only some flags
+// may get set concurrently (see implementation).
+func InitFlags(flagset *flag.FlagSet) {
+ if flagset == nil {
+ flagset = flag.CommandLine
+ }
+
+ commandLine.VisitAll(func(f *flag.Flag) {
+ flagset.Var(f.Value, f.Name, f.Usage)
+ })
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+ logging.lockAndFlushAll()
+}
+
+// settings collects global settings.
+type settings struct {
+ // contextualLoggingEnabled controls whether contextual logging is
+ // active. Disabling it may have some small performance benefit.
+ contextualLoggingEnabled bool
+
+ // logger is the global Logger chosen by users of klog, nil if
+ // none is available.
+ logger *Logger
+
+ // loggerOptions contains the options that were supplied for
+ // globalLogger.
+ loggerOptions loggerOptions
+
+ // Boolean flags. Not handled atomically because the flag.Value interface
+ // does not let us avoid the =true, and that shorthand is necessary for
+ // compatibility. TODO: does this matter enough to fix? Seems unlikely.
+ toStderr bool // The -logtostderr flag.
+ alsoToStderr bool // The -alsologtostderr flag.
+
+ // Level flag. Handled atomically.
+ stderrThreshold severityValue // The -stderrthreshold flag.
+
+ // Access to all of the following fields must be protected via a mutex.
+
+ // file holds writer for each of the log types.
+ file [severity.NumSeverity]flushSyncWriter
+ // flushInterval is the interval for periodic flushing. If zero,
+ // the global default will be used.
+ flushInterval time.Duration
+
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+ // traceLocation is the state of the -log_backtrace_at flag.
+ traceLocation traceLocation
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity Level // V logging level, the value of the -v flag/
+
+ // If non-empty, overrides the choice of directory in which to write logs.
+ // See createLogDirs for the full list of possible destinations.
+ logDir string
+
+ // If non-empty, specifies the path of the file to write logs. mutually exclusive
+ // with the log_dir option.
+ logFile string
+
+ // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
+ // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
+ logFileMaxSizeMB uint64
+
+ // If true, do not add the prefix headers, useful when used with SetOutput
+ skipHeaders bool
+
+ // If true, do not add the headers to log files
+ skipLogHeaders bool
+
+ // If true, add the file directory to the header
+ addDirHeader bool
+
+ // If true, messages will not be propagated to lower severity log levels
+ oneOutput bool
+
+ // If set, all output will be filtered through the filter.
+ filter LogFilter
+}
+
+// deepCopy creates a copy that doesn't share anything with the original
+// instance.
+func (s settings) deepCopy() settings {
+ // vmodule is a slice and would be shared, so we have copy it.
+ filter := make([]modulePat, len(s.vmodule.filter))
+ for i := range s.vmodule.filter {
+ filter[i] = s.vmodule.filter[i]
+ }
+ s.vmodule.filter = filter
+
+ return s
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+ settings
+
+ // bufferCache maintains the free list. It uses its own mutex
+ // so buffers can be grabbed and printed to without holding the main lock,
+ // for better parallelization.
+ bufferCache buffer.Buffers
+
+ // flushD holds a flushDaemon that frequently flushes log file buffers.
+ // Uses its own mutex.
+ flushD *flushDaemon
+
+ // mu protects the remaining elements of this structure and the fields
+ // in settingsT which need a mutex lock.
+ mu sync.Mutex
+
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+}
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ l.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&l.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ l.vmodule.filter = filter
+ l.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&l.filterLength, int32(len(filter)))
+ l.verbosity.set(verbosity)
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+// CaptureState gathers information about all current klog settings.
+// The result can be used to restore those settings.
+func CaptureState() State {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return &state{
+ settings: logging.settings.deepCopy(),
+ flushDRunning: logging.flushD.isRunning(),
+ maxSize: MaxSize,
+ }
+}
+
+// State stores a snapshot of klog settings. It gets created with CaptureState
+// and can be used to restore the entire state. Modifying individual settings
+// is supported via the command line flags.
+type State interface {
+ // Restore restore the entire state. It may get called more than once.
+ Restore()
+}
+
+type state struct {
+ settings
+
+ flushDRunning bool
+ maxSize uint64
+}
+
+func (s *state) Restore() {
+ // This needs to be done before mutex locking.
+ if s.flushDRunning && !logging.flushD.isRunning() {
+ // This is not quite accurate: StartFlushDaemon might
+ // have been called with some different interval.
+ interval := s.flushInterval
+ if interval == 0 {
+ interval = flushInterval
+ }
+ logging.flushD.run(interval)
+ } else if !s.flushDRunning && logging.flushD.isRunning() {
+ logging.flushD.stop()
+ }
+
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+
+ logging.settings = s.settings
+ logging.setVState(s.verbosity, s.vmodule.filter, true)
+ MaxSize = s.maxSize
+}
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+
+ Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+
+where the fields are defined as follows:
+
+ L A single character, representing the log level (eg 'I' for INFO)
+ mm The month (zero padded; ie May is '05')
+ dd The day (zero padded)
+ hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+ threadid The space-padded thread ID as returned by GetTID()
+ file The file name
+ line The line number
+ msg The user-supplied message
+*/
+func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) {
+ _, file, line, ok := runtime.Caller(3 + depth)
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ path := file
+ file = path[slash+1:]
+ if l.addDirHeader {
+ if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 {
+ file = path[dirsep+1:]
+ }
+ }
+ }
+ }
+ return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
+ buf := l.bufferCache.GetBuffer()
+ if l.skipHeaders {
+ return buf
+ }
+ now := timeNow()
+ buf.FormatHeader(s, file, line, now)
+ return buf
+}
+
+func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) {
+ l.printlnDepth(s, logger, filter, 1, args...)
+}
+
+func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ // if logger is set, we clear the generated header as we rely on the backing
+ // logger implementation to print headers
+ if logger != nil {
+ l.bufferCache.PutBuffer(buf)
+ buf = l.bufferCache.GetBuffer()
+ }
+ if filter != nil {
+ args = filter.Filter(args)
+ }
+ fmt.Fprintln(buf, args...)
+ l.output(s, logger, buf, depth, file, line, false)
+}
+
+func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) {
+ l.printDepth(s, logger, filter, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ // if logr is set, we clear the generated header as we rely on the backing
+ // logr implementation to print headers
+ if logger != nil {
+ l.bufferCache.PutBuffer(buf)
+ buf = l.bufferCache.GetBuffer()
+ }
+ if filter != nil {
+ args = filter.Filter(args)
+ }
+ fmt.Fprint(buf, args...)
+ if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logger, buf, depth, file, line, false)
+}
+
+func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) {
+ l.printfDepth(s, logger, filter, 1, format, args...)
+}
+
+func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ // if logr is set, we clear the generated header as we rely on the backing
+ // logr implementation to print headers
+ if logger != nil {
+ l.bufferCache.PutBuffer(buf)
+ buf = l.bufferCache.GetBuffer()
+ }
+ if filter != nil {
+ format, args = filter.FilterF(format, args)
+ }
+ fmt.Fprintf(buf, format, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logger, buf, depth, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number. If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
+ buf := l.formatHeader(s, file, line)
+ // if logr is set, we clear the generated header as we rely on the backing
+ // logr implementation to print headers
+ if logger != nil {
+ l.bufferCache.PutBuffer(buf)
+ buf = l.bufferCache.GetBuffer()
+ }
+ if filter != nil {
+ args = filter.Filter(args)
+ }
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr)
+}
+
+// if loggr is specified, will call loggr.Error, otherwise output with logging module.
+func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
+ if filter != nil {
+ msg, keysAndValues = filter.FilterS(msg, keysAndValues)
+ }
+ if logger != nil {
+ logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...)
+ return
+ }
+ l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...)
+}
+
+// if loggr is specified, will call loggr.Info, otherwise output with logging module.
+func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
+ if filter != nil {
+ msg, keysAndValues = filter.FilterS(msg, keysAndValues)
+ }
+ if logger != nil {
+ logger.WithCallDepth(depth+2).Info(msg, keysAndValues...)
+ return
+ }
+ l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...)
+}
+
+// printS is called from infoS and errorS if loggr is not specified.
+// set log severity by s
+func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
+ // Only create a new buffer if we don't have one cached.
+ b := l.bufferCache.GetBuffer()
+ // The message is always quoted, even if it contains line breaks.
+ // If developers want multi-line output, they should use a small, fixed
+ // message and put the multi-line output into a value.
+ b.WriteString(strconv.Quote(msg))
+ if err != nil {
+ serialize.KVListFormat(&b.Buffer, "err", err)
+ }
+ serialize.KVListFormat(&b.Buffer, keysAndValues...)
+ l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
+ // Make the buffer available for reuse.
+ l.bufferCache.PutBuffer(b)
+}
+
+// redirectBuffer is used to set an alternate destination for the logs
+type redirectBuffer struct {
+ w io.Writer
+}
+
+func (rb *redirectBuffer) Sync() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Flush() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
+ return rb.w.Write(bytes)
+}
+
+// SetOutput sets the output destination for all severities
+func SetOutput(w io.Writer) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ for s := severity.FatalLog; s >= severity.InfoLog; s-- {
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[s] = rb
+ }
+}
+
+// SetOutputBySeverity sets the output destination for specific severity
+func SetOutputBySeverity(name string, w io.Writer) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ sev, ok := severity.ByName(name)
+ if !ok {
+ panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
+ }
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[sev] = rb
+}
+
+// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
+func LogToStderr(stderr bool) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+
+ logging.toStderr = stderr
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) {
+ var isLocked = true
+ l.mu.Lock()
+ defer func() {
+ if isLocked {
+ // Unlock before returning in case that it wasn't done already.
+ l.mu.Unlock()
+ }
+ }()
+
+ if l.traceLocation.isSet() {
+ if l.traceLocation.match(file, line) {
+ buf.Write(dbg.Stacks(false))
+ }
+ }
+ data := buf.Bytes()
+ if log != nil {
+ // TODO: set 'severity' and caller information as structured log info
+ // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
+ if s == severity.ErrorLog {
+ logging.logger.WithCallDepth(depth+3).Error(nil, string(data))
+ } else {
+ log.WithCallDepth(depth + 3).Info(string(data))
+ }
+ } else if l.toStderr {
+ os.Stderr.Write(data)
+ } else {
+ if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+ os.Stderr.Write(data)
+ }
+
+ if logging.logFile != "" {
+ // Since we are using a single log file, all of the items in l.file array
+ // will point to the same file, so just use one of them to write data.
+ if l.file[severity.InfoLog] == nil {
+ if err := l.createFiles(severity.InfoLog); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+ l.file[severity.InfoLog].Write(data)
+ } else {
+ if l.file[s] == nil {
+ if err := l.createFiles(s); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+
+ if l.oneOutput {
+ l.file[s].Write(data)
+ } else {
+ switch s {
+ case severity.FatalLog:
+ l.file[severity.FatalLog].Write(data)
+ fallthrough
+ case severity.ErrorLog:
+ l.file[severity.ErrorLog].Write(data)
+ fallthrough
+ case severity.WarningLog:
+ l.file[severity.WarningLog].Write(data)
+ fallthrough
+ case severity.InfoLog:
+ l.file[severity.InfoLog].Write(data)
+ }
+ }
+ }
+ }
+ if s == severity.FatalLog {
+ // If we got here via Exit rather than Fatal, print no stacks.
+ if atomic.LoadUint32(&fatalNoStacks) > 0 {
+ l.mu.Unlock()
+ isLocked = false
+ timeoutFlush(ExitFlushTimeout)
+ OsExit(1)
+ }
+ // Dump all goroutine stacks before exiting.
+ // First, make sure we see the trace for the current goroutine on standard error.
+ // If -logtostderr has been specified, the loop below will do that anyway
+ // as the first stack in the full dump.
+ if !l.toStderr {
+ os.Stderr.Write(dbg.Stacks(false))
+ }
+
+ // Write the stack trace for all goroutines to the files.
+ trace := dbg.Stacks(true)
+ logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+ for log := severity.FatalLog; log >= severity.InfoLog; log-- {
+ if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+ f.Write(trace)
+ }
+ }
+ l.mu.Unlock()
+ isLocked = false
+ timeoutFlush(ExitFlushTimeout)
+ OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+ }
+ l.bufferCache.PutBuffer(buf)
+
+ if stats := severityStats[s]; stats != nil {
+ atomic.AddInt64(&stats.lines, 1)
+ atomic.AddInt64(&stats.bytes, int64(len(data)))
+ }
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+ fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+ // If logExitFunc is set, we do that instead of exiting.
+ if logExitFunc != nil {
+ logExitFunc(err)
+ return
+ }
+ l.flushAll()
+ OsExit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+ logger *loggingT
+ *bufio.Writer
+ file *os.File
+ sev severity.Severity
+ nbytes uint64 // The number of bytes written to this file
+ maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
+}
+
+func (sb *syncBuffer) Sync() error {
+ return sb.file.Sync()
+}
+
+// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
+func CalculateMaxSize() uint64 {
+ if logging.logFile != "" {
+ if logging.logFileMaxSizeMB == 0 {
+ // If logFileMaxSizeMB is zero, we don't have limitations on the log size.
+ return math.MaxUint64
+ }
+ // Flag logFileMaxSizeMB is in MB for user convenience.
+ return logging.logFileMaxSizeMB * 1024 * 1024
+ }
+ // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
+ return MaxSize
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+ if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
+ if err := sb.rotateFile(time.Now(), false); err != nil {
+ sb.logger.exit(err)
+ }
+ }
+ n, err = sb.Writer.Write(p)
+ sb.nbytes += uint64(n)
+ if err != nil {
+ sb.logger.exit(err)
+ }
+ return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
+ if sb.file != nil {
+ sb.Flush()
+ sb.file.Close()
+ }
+ var err error
+ sb.file, _, err = create(severity.Name[sb.sev], now, startup)
+ if err != nil {
+ return err
+ }
+ if startup {
+ fileInfo, err := sb.file.Stat()
+ if err != nil {
+ return fmt.Errorf("file stat could not get fileinfo: %v", err)
+ }
+ // init file size
+ sb.nbytes = uint64(fileInfo.Size())
+ } else {
+ sb.nbytes = 0
+ }
+ sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+ if sb.logger.skipLogHeaders {
+ return nil
+ }
+
+ // Write header.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+ fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+ fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+ n, err := sb.file.Write(buf.Bytes())
+ sb.nbytes += uint64(n)
+ return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity.Severity) error {
+ interval := l.flushInterval
+ if interval == 0 {
+ interval = flushInterval
+ }
+ l.flushD.run(interval)
+ now := time.Now()
+ // Files are created in decreasing severity order, so as soon as we find one
+ // has already been created, we can stop.
+ for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- {
+ sb := &syncBuffer{
+ logger: l,
+ sev: s,
+ maxbytes: CalculateMaxSize(),
+ }
+ if err := sb.rotateFile(now, true); err != nil {
+ return err
+ }
+ l.file[s] = sb
+ }
+ return nil
+}
+
+const flushInterval = 5 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+type flushDaemon struct {
+ mu sync.Mutex
+ clock clock.WithTicker
+ flush func()
+ stopC chan struct{}
+ stopDone chan struct{}
+}
+
+// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a
+// clock.RealClock is used.
+func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon {
+ if tickClock == nil {
+ tickClock = clock.RealClock{}
+ }
+ return &flushDaemon{
+ flush: flush,
+ clock: tickClock,
+ }
+}
+
+// run starts a goroutine that periodically calls the daemons flush function.
+// Calling run on an already running daemon will have no effect.
+func (f *flushDaemon) run(interval time.Duration) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.stopC != nil { // daemon already running
+ return
+ }
+
+ f.stopC = make(chan struct{}, 1)
+ f.stopDone = make(chan struct{}, 1)
+
+ ticker := f.clock.NewTicker(interval)
+ go func() {
+ defer ticker.Stop()
+ defer func() { f.stopDone <- struct{}{} }()
+ for {
+ select {
+ case <-ticker.C():
+ f.flush()
+ case <-f.stopC:
+ f.flush()
+ return
+ }
+ }
+ }()
+}
+
+// stop stops the running flushDaemon and waits until the daemon has shut down.
+// Calling stop on a daemon that isn't running will have no effect.
+func (f *flushDaemon) stop() {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.stopC == nil { // daemon not running
+ return
+ }
+
+ f.stopC <- struct{}{}
+ <-f.stopDone
+
+ f.stopC = nil
+ f.stopDone = nil
+}
+
+// isRunning returns true if the flush daemon is running.
+func (f *flushDaemon) isRunning() bool {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.stopC != nil
+}
+
+// StopFlushDaemon stops the flush daemon, if running, and flushes once.
+// This prevents klog from leaking goroutines on shutdown. After stopping
+// the daemon, you can still manually flush buffers again by calling Flush().
+func StopFlushDaemon() {
+ logging.flushD.stop()
+}
+
+// StartFlushDaemon ensures that the flush daemon runs with the given delay
+// between flush calls. If it is already running, it gets restarted.
+func StartFlushDaemon(interval time.Duration) {
+ StopFlushDaemon()
+ logging.flushD.run(interval)
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+ l.mu.Lock()
+ l.flushAll()
+ l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+ // Flush from fatal down, in case there's trouble flushing.
+ for s := severity.FatalLog; s >= severity.InfoLog; s-- {
+ file := l.file[s]
+ if file != nil {
+ file.Flush() // ignore error
+ file.Sync() // ignore error
+ }
+ }
+ if logging.loggerOptions.flush != nil {
+ logging.loggerOptions.flush()
+ }
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities. Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+ sev, ok := severity.ByName(name)
+ if !ok {
+ panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+ }
+ // Set a log format that captures the user's file and line:
+ // d.go:23: message
+ stdLog.SetFlags(stdLog.Lshortfile)
+ stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity.Severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+ var (
+ file = "???"
+ line = 1
+ text string
+ )
+ // Split "d.go:23: message" into "d.go", "23", and "message".
+ if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+ text = fmt.Sprintf("bad log format: %s", b)
+ } else {
+ file = string(parts[0])
+ text = string(parts[2][1:]) // skip leading space
+ line, err = strconv.Atoi(string(parts[1]))
+ if err != nil {
+ text = fmt.Sprintf("bad line number: %s", b)
+ line = 1
+ }
+ }
+ // printWithFileLine with alsoToStderr=true, so standard log messages
+ // always appear on standard error.
+ logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text)
+ return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ if strings.HasSuffix(file, ".go") {
+ file = file[:len(file)-3]
+ }
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range l.vmodule.filter {
+ if filter.match(file) {
+ l.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ l.vmap[pc] = 0
+ return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose struct {
+ enabled bool
+ logr *logr.Logger
+}
+
+func newVerbose(level Level, b bool) Verbose {
+ if logging.logger == nil {
+ return Verbose{b, nil}
+ }
+ v := logging.logger.V(int(level))
+ return Verbose{b, &v}
+}
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a struct of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//
+// if klog.V(2).Enabled() { klog.Info("log this") }
+//
+// or
+//
+// klog.V(2).Info("log this")
+//
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and -vmodule flags; both are off by default. The V call will log if its level
+// is less than or equal to the value of the -v flag, or alternatively if its level is
+// less than or equal to the value of the -vmodule pattern matching the source file
+// containing the call.
+func V(level Level) Verbose {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if logging.verbosity.get() >= level {
+ return newVerbose(level, true)
+ }
+
+ // It's off globally but vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&logging.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ if runtime.Callers(2, logging.pcs[:]) == 0 {
+ return newVerbose(level, false)
+ }
+ // runtime.Callers returns "return PCs", but we want
+ // to look up the symbolic information for the call,
+ // so subtract 1 from the PC. runtime.CallersFrames
+ // would be cleaner, but allocates.
+ pc := logging.pcs[0] - 1
+ v, ok := logging.vmap[pc]
+ if !ok {
+ v = logging.setV(pc)
+ }
+ return newVerbose(level, v >= level)
+ }
+ return newVerbose(level, false)
+}
+
+// Enabled will return true if this log level is enabled, guarded by the value
+// of v.
+// See the documentation of V for usage.
+func (v Verbose) Enabled() bool {
+ return v.enabled
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+ if v.enabled {
+ logging.print(severity.InfoLog, v.logr, logging.filter, args...)
+ }
+}
+
+// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoDepth(depth int, args ...interface{}) {
+ if v.enabled {
+ logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...)
+ }
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+ if v.enabled {
+ logging.println(severity.InfoLog, v.logr, logging.filter, args...)
+ }
+}
+
+// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfolnDepth(depth int, args ...interface{}) {
+ if v.enabled {
+ logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...)
+ }
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+ if v.enabled {
+ logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...)
+ }
+}
+
+// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) {
+ if v.enabled {
+ logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...)
+ }
+}
+
+// InfoS is equivalent to the global InfoS function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
+ if v.enabled {
+ logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...)
+ }
+}
+
+// InfoSDepth acts as InfoS but uses depth to determine which call frame to log.
+// InfoSDepth(0, "msg") is the same as InfoS("msg").
+func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
+ logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...)
+}
+
+// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
+ if v.enabled {
+ logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...)
+ }
+}
+
+// Deprecated: Use ErrorS instead.
+func (v Verbose) Error(err error, msg string, args ...interface{}) {
+ if v.enabled {
+ logging.errorS(err, v.logr, logging.filter, 0, msg, args...)
+ }
+}
+
+// ErrorS is equivalent to the global Error function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) {
+ if v.enabled {
+ logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...)
+ }
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+ logging.print(severity.InfoLog, logging.logger, logging.filter, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Infoln(args ...interface{}) {
+ logging.println(severity.InfoLog, logging.logger, logging.filter, args...)
+}
+
+// InfolnDepth acts as Infoln but uses depth to determine which call frame to log.
+// InfolnDepth(0, "msg") is the same as Infoln("msg").
+func InfolnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+ logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...)
+}
+
+// InfofDepth acts as Infof but uses depth to determine which call frame to log.
+// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...).
+func InfofDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// InfoS structured logs to the INFO log.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready")
+// output:
+// >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready"
+func InfoS(msg string, keysAndValues ...interface{}) {
+ logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+ logging.print(severity.WarningLog, logging.logger, logging.filter, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Warningln(args ...interface{}) {
+ logging.println(severity.WarningLog, logging.logger, logging.filter, args...)
+}
+
+// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log.
+// WarninglnDepth(0, "msg") is the same as Warningln("msg").
+func WarninglnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+ logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...)
+}
+
+// WarningfDepth acts as Warningf but uses depth to determine which call frame to log.
+// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...).
+func WarningfDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+ logging.print(severity.ErrorLog, logging.logger, logging.filter, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Errorln(args ...interface{}) {
+ logging.println(severity.ErrorLog, logging.logger, logging.filter, args...)
+}
+
+// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log.
+// ErrorlnDepth(0, "msg") is the same as Errorln("msg").
+func ErrorlnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+ logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...)
+}
+
+// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log.
+// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...).
+func ErrorfDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// ErrorS structured logs to the ERROR, WARNING, and INFO logs.
+// the err argument used as "err" field of log line.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.ErrorS(err, "Failed to update pod status")
+// output:
+// >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout"
+func ErrorS(err error, msg string, keysAndValues ...interface{}) {
+ logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...)
+}
+
+// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log.
+// ErrorSDepth(0, "msg") is the same as ErrorS("msg").
+func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) {
+ logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// prints stack trace(s), then calls OsExit(255).
+//
+// Stderr only receives a dump of the current goroutine's stack trace. Log files,
+// if there are any, receive a dump of the stack traces in all goroutines.
+//
+// Callers who want more control over handling of fatal events may instead use a
+// combination of different functions:
+// - some info or error logging function, optionally with a stack trace
+// value generated by github.com/go-logr/lib/dbg.Backtrace
+// - Flush to flush pending log data
+// - panic, os.Exit or returning to the caller with an error
+//
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+ logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls OsExit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Fatalln(args ...interface{}) {
+ logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log.
+// FatallnDepth(0, "msg") is the same as Fatalln("msg").
+func FatallnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls OsExit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+ logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
+}
+
+// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log.
+// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...).
+func FatalfDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
+func Exitln(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log.
+// ExitlnDepth(0, "msg") is the same as Exitln("msg").
+func ExitlnDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
+}
+
+// ExitfDepth acts as Exitf but uses depth to determine which call frame to log.
+// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...).
+func ExitfDepth(depth int, format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// LogFilter is a collection of functions that can filter all logging calls,
+// e.g. for sanitization of arguments and prevent accidental leaking of secrets.
+type LogFilter interface {
+ Filter(args []interface{}) []interface{}
+ FilterF(format string, args []interface{}) (string, []interface{})
+ FilterS(msg string, keysAndValues []interface{}) (string, []interface{})
+}
+
+// SetLogFilter installs a filter that is used for all log calls.
+//
+// Modifying the filter is not thread-safe and should be done while no other
+// goroutines invoke log calls, usually during program initialization.
+func SetLogFilter(filter LogFilter) {
+ logging.filter = filter
+}
diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go
new file mode 100644
index 0000000000..1025d644f3
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog_file.go
@@ -0,0 +1,130 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package klog
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+func createLogDirs() {
+ if logging.logDir != "" {
+ logDirs = append(logDirs, logging.logDir)
+ }
+ logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+ pid = os.Getpid()
+ program = filepath.Base(os.Args[0])
+ host = "unknownhost"
+ userName = "unknownuser"
+ userNameOnce sync.Once
+)
+
+func init() {
+ if h, err := os.Hostname(); err == nil {
+ host = shortHostname(h)
+ }
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+ if i := strings.Index(hostname, "."); i >= 0 {
+ return hostname[:i]
+ }
+ return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+ name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+ program,
+ host,
+ getUserName(),
+ tag,
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ t.Second(),
+ pid)
+ return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
+ if logging.logFile != "" {
+ f, err := openOrCreate(logging.logFile, startup)
+ if err == nil {
+ return f, logging.logFile, nil
+ }
+ return nil, "", fmt.Errorf("log: unable to create log: %v", err)
+ }
+ onceLogDirs.Do(createLogDirs)
+ if len(logDirs) == 0 {
+ return nil, "", errors.New("log: no log dirs")
+ }
+ name, link := logName(tag, t)
+ var lastErr error
+ for _, dir := range logDirs {
+ fname := filepath.Join(dir, name)
+ f, err := openOrCreate(fname, startup)
+ if err == nil {
+ symlink := filepath.Join(dir, link)
+ os.Remove(symlink) // ignore err
+ os.Symlink(name, symlink) // ignore err
+ return f, fname, nil
+ }
+ lastErr = err
+ }
+ return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
+
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func openOrCreate(name string, startup bool) (*os.File, error) {
+ if startup {
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ return f, err
+ }
+ f, err := os.Create(name)
+ return f, err
+}
diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go
new file mode 100644
index 0000000000..aa46726851
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog_file_others.go
@@ -0,0 +1,19 @@
+//go:build !windows
+// +build !windows
+
+package klog
+
+import (
+ "os/user"
+)
+
+func getUserName() string {
+ userNameOnce.Do(func() {
+ current, err := user.Current()
+ if err == nil {
+ userName = current.Username
+ }
+ })
+
+ return userName
+}
diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go
new file mode 100644
index 0000000000..2517f9c538
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog_file_windows.go
@@ -0,0 +1,34 @@
+//go:build windows
+// +build windows
+
+package klog
+
+import (
+ "os"
+ "strings"
+)
+
+func getUserName() string {
+ userNameOnce.Do(func() {
+ // On Windows, the Go 'user' package requires netapi32.dll.
+ // This affects Windows Nano Server:
+ // https://github.com/golang/go/issues/21867
+ // Fallback to using environment variables.
+ u := os.Getenv("USERNAME")
+ if len(u) == 0 {
+ return
+ }
+ // Sanitize the USERNAME since it may contain filepath separators.
+ u = strings.Replace(u, `\`, "_", -1)
+
+ // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME'
+ d := os.Getenv("USERDOMAIN")
+ if len(d) != 0 {
+ userName = d + "_" + u
+ } else {
+ userName = u
+ }
+ })
+
+ return userName
+}
diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go
new file mode 100644
index 0000000000..027a4014af
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klogr.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "github.com/go-logr/logr"
+
+ "k8s.io/klog/v2/internal/serialize"
+)
+
+// NewKlogr returns a logger that is functionally identical to
+// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The
+// difference is that it uses a simpler implementation.
+func NewKlogr() Logger {
+ return New(&klogger{})
+}
+
+// klogger is a subset of klogr/klogr.go. It had to be copied to break an
+// import cycle (klogr wants to use klog, and klog wants to use klogr).
+type klogger struct {
+ level int
+ callDepth int
+ prefix string
+ values []interface{}
+}
+
+func (l *klogger) Init(info logr.RuntimeInfo) {
+ l.callDepth += info.CallDepth
+}
+
+func (l klogger) Info(level int, msg string, kvList ...interface{}) {
+ merged := serialize.MergeKVs(l.values, kvList)
+ if l.prefix != "" {
+ msg = l.prefix + ": " + msg
+ }
+ V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
+}
+
+func (l klogger) Enabled(level int) bool {
+ return V(Level(level)).Enabled()
+}
+
+func (l klogger) Error(err error, msg string, kvList ...interface{}) {
+ merged := serialize.MergeKVs(l.values, kvList)
+ if l.prefix != "" {
+ msg = l.prefix + ": " + msg
+ }
+ ErrorSDepth(l.callDepth+1, err, msg, merged...)
+}
+
+// WithName returns a new logr.Logger with the specified name appended. klogr
+// uses '/' characters to separate name elements. Callers should not pass '/'
+// in the provided name string, but this library does not actually enforce that.
+func (l klogger) WithName(name string) logr.LogSink {
+ if len(l.prefix) > 0 {
+ l.prefix = l.prefix + "/"
+ }
+ l.prefix += name
+ return &l
+}
+
+func (l klogger) WithValues(kvList ...interface{}) logr.LogSink {
+ l.values = serialize.WithValues(l.values, kvList)
+ return &l
+}
+
+func (l klogger) WithCallDepth(depth int) logr.LogSink {
+ l.callDepth += depth
+ return &l
+}
+
+var _ logr.LogSink = &klogger{}
+var _ logr.CallDepthLogSink = &klogger{}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 822e225348..ca1f551856 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,8 +1,10 @@
-# cloud.google.com/go v0.93.3
-## explicit; go 1.11
+# cloud.google.com/go v0.102.1
+## explicit; go 1.15
cloud.google.com/go
-cloud.google.com/go/compute/metadata
cloud.google.com/go/internal/version
+# cloud.google.com/go/compute v1.7.0
+## explicit; go 1.15
+cloud.google.com/go/compute/metadata
# cloud.google.com/go/logging v1.4.2
## explicit; go 1.11
cloud.google.com/go/logging
@@ -69,7 +71,7 @@ github.com/armon/go-metrics
# github.com/armon/go-radix v1.0.1-0.20221118154546-54df44f2176c
## explicit
github.com/armon/go-radix
-# github.com/aws/aws-sdk-go v1.31.6
+# github.com/aws/aws-sdk-go v1.37.0
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr
@@ -81,6 +83,7 @@ github.com/aws/aws-sdk-go/aws/credentials
github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
github.com/aws/aws-sdk-go/aws/credentials/processcreds
+github.com/aws/aws-sdk-go/aws/credentials/ssocreds
github.com/aws/aws-sdk-go/aws/credentials/stscreds
github.com/aws/aws-sdk-go/aws/csm
github.com/aws/aws-sdk-go/aws/defaults
@@ -104,15 +107,18 @@ github.com/aws/aws-sdk-go/private/protocol/jsonrpc
github.com/aws/aws-sdk-go/private/protocol/query
github.com/aws/aws-sdk-go/private/protocol/query/queryutil
github.com/aws/aws-sdk-go/private/protocol/rest
+github.com/aws/aws-sdk-go/private/protocol/restjson
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
github.com/aws/aws-sdk-go/service/cloudwatchlogs
+github.com/aws/aws-sdk-go/service/sso
+github.com/aws/aws-sdk-go/service/sso/ssoiface
github.com/aws/aws-sdk-go/service/sts
github.com/aws/aws-sdk-go/service/sts/stsiface
# github.com/beorn7/perks v1.0.1
## explicit; go 1.11
github.com/beorn7/perks/quantile
-# github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549
-## explicit
+# github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8
+## explicit; go 1.12
github.com/bsphere/le_go
# github.com/cespare/xxhash/v2 v2.1.2
## explicit; go 1.11
@@ -320,7 +326,7 @@ github.com/dustin/go-humanize
# github.com/felixge/httpsnoop v1.0.2
## explicit; go 1.13
github.com/felixge/httpsnoop
-# github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e
+# github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee
## explicit
github.com/fernet/fernet-go
# github.com/fluent/fluent-logger-golang v1.9.0
@@ -374,8 +380,8 @@ github.com/golang/protobuf/ptypes/wrappers
# github.com/google/btree v1.1.2
## explicit; go 1.18
github.com/google/btree
-# github.com/google/certificate-transparency-go v1.1.2 => github.com/google/certificate-transparency-go v1.0.20
-## explicit
+# github.com/google/certificate-transparency-go v1.1.4
+## explicit; go 1.17
github.com/google/certificate-transparency-go
github.com/google/certificate-transparency-go/asn1
github.com/google/certificate-transparency-go/client
@@ -398,9 +404,16 @@ github.com/google/shlex
# github.com/google/uuid v1.3.0
## explicit
github.com/google/uuid
-# github.com/googleapis/gax-go/v2 v2.0.5
-## explicit
+# github.com/googleapis/enterprise-certificate-proxy v0.1.0
+## explicit; go 1.18
+github.com/googleapis/enterprise-certificate-proxy/client
+github.com/googleapis/enterprise-certificate-proxy/client/util
+# github.com/googleapis/gax-go/v2 v2.4.0
+## explicit; go 1.15
github.com/googleapis/gax-go/v2
+github.com/googleapis/gax-go/v2/apierror
+github.com/googleapis/gax-go/v2/apierror/internal/proto
+github.com/googleapis/gax-go/v2/internal
# github.com/gorilla/mux v1.8.0
## explicit; go 1.12
github.com/gorilla/mux
@@ -446,13 +459,13 @@ github.com/hashicorp/serf/serf
# github.com/imdario/mergo v0.3.12
## explicit; go 1.13
github.com/imdario/mergo
-# github.com/inconshreveable/mousetrap v1.0.0
-## explicit
+# github.com/inconshreveable/mousetrap v1.0.1
+## explicit; go 1.18
github.com/inconshreveable/mousetrap
# github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062
## explicit; go 1.12
github.com/ishidawataru/sctp
-# github.com/jmespath/go-jmespath v0.3.0
+# github.com/jmespath/go-jmespath v0.4.0
## explicit; go 1.14
github.com/jmespath/go-jmespath
# github.com/klauspost/compress v1.15.9
@@ -601,8 +614,8 @@ github.com/moby/patternmatcher
# github.com/moby/pubsub v1.0.0
## explicit; go 1.19
github.com/moby/pubsub
-# github.com/moby/swarmkit/v2 v2.0.0-20221102165002-6341884e5fc9
-## explicit; go 1.17
+# github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054
+## explicit; go 1.18
github.com/moby/swarmkit/v2/agent
github.com/moby/swarmkit/v2/agent/configs
github.com/moby/swarmkit/v2/agent/csi
@@ -684,8 +697,8 @@ github.com/moby/sys/signal
# github.com/moby/sys/symlink v0.2.0
## explicit; go 1.16
github.com/moby/sys/symlink
-# github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
-## explicit; go 1.13
+# github.com/moby/term v0.0.0-20221120202655-abb19827d345
+## explicit; go 1.18
github.com/moby/term
github.com/moby/term/windows
# github.com/morikuni/aec v1.0.0
@@ -727,21 +740,21 @@ github.com/philhofer/fwd
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
-# github.com/prometheus/client_golang v1.12.1
-## explicit; go 1.13
+# github.com/prometheus/client_golang v1.13.0
+## explicit; go 1.17
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.2.0
## explicit; go 1.9
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.32.1
-## explicit; go 1.13
+# github.com/prometheus/common v0.37.0
+## explicit; go 1.16
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.7.3
-## explicit; go 1.13
+# github.com/prometheus/procfs v0.8.0
+## explicit; go 1.17
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -758,8 +771,8 @@ github.com/sean-/seed
# github.com/sirupsen/logrus v1.9.0
## explicit; go 1.13
github.com/sirupsen/logrus
-# github.com/spf13/cobra v1.1.3
-## explicit; go 1.12
+# github.com/spf13/cobra v1.6.1
+## explicit; go 1.15
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
## explicit; go 1.12
@@ -796,23 +809,23 @@ github.com/vishvananda/netns
# go.etcd.io/bbolt v1.3.6
## explicit; go 1.12
go.etcd.io/bbolt
-# go.etcd.io/etcd/client/pkg/v3 v3.5.2
+# go.etcd.io/etcd/client/pkg/v3 v3.5.6
## explicit; go 1.16
go.etcd.io/etcd/client/pkg/v3/fileutil
-# go.etcd.io/etcd/pkg/v3 v3.5.2
+# go.etcd.io/etcd/pkg/v3 v3.5.6
## explicit; go 1.16
go.etcd.io/etcd/pkg/v3/crc
go.etcd.io/etcd/pkg/v3/idutil
go.etcd.io/etcd/pkg/v3/ioutil
go.etcd.io/etcd/pkg/v3/pbutil
-# go.etcd.io/etcd/raft/v3 v3.5.2
+# go.etcd.io/etcd/raft/v3 v3.5.6
## explicit; go 1.16
go.etcd.io/etcd/raft/v3
go.etcd.io/etcd/raft/v3/confchange
go.etcd.io/etcd/raft/v3/quorum
go.etcd.io/etcd/raft/v3/raftpb
go.etcd.io/etcd/raft/v3/tracker
-# go.etcd.io/etcd/server/v3 v3.5.2
+# go.etcd.io/etcd/server/v3 v3.5.6
## explicit; go 1.16
go.etcd.io/etcd/server/v3/etcdserver/api/snap
go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb
@@ -887,13 +900,13 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1
go.opentelemetry.io/proto/otlp/common/v1
go.opentelemetry.io/proto/otlp/resource/v1
go.opentelemetry.io/proto/otlp/trace/v1
-# go.uber.org/atomic v1.7.0
+# go.uber.org/atomic v1.9.0
## explicit; go 1.13
go.uber.org/atomic
-# go.uber.org/multierr v1.6.0
-## explicit; go 1.12
+# go.uber.org/multierr v1.8.0
+## explicit; go 1.14
go.uber.org/multierr
-# go.uber.org/zap v1.17.0
+# go.uber.org/zap v1.21.0
## explicit; go 1.13
go.uber.org/zap
go.uber.org/zap/buffer
@@ -975,10 +988,12 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.1.0
## explicit
golang.org/x/time/rate
-# google.golang.org/api v0.54.0
-## explicit; go 1.11
+# google.golang.org/api v0.93.0
+## explicit; go 1.15
+google.golang.org/api/googleapi
google.golang.org/api/internal
google.golang.org/api/internal/impersonate
+google.golang.org/api/internal/third_party/uritemplates
google.golang.org/api/iterator
google.golang.org/api/option
google.golang.org/api/option/internaloption
@@ -1000,7 +1015,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
+# google.golang.org/genproto v0.0.0-20220706185917-7780775163c4
## explicit; go 1.15
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
@@ -1012,9 +1027,11 @@ google.golang.org/genproto/googleapis/api/monitoredres
google.golang.org/genproto/googleapis/logging/type
google.golang.org/genproto/googleapis/logging/v2
google.golang.org/genproto/googleapis/longrunning
+google.golang.org/genproto/googleapis/rpc/code
+google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/grpc v1.47.0
+# google.golang.org/grpc v1.48.0
## explicit; go 1.14
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -1078,7 +1095,7 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.28.0
+# google.golang.org/protobuf v1.28.1
## explicit; go 1.11
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
@@ -1130,4 +1147,11 @@ gotest.tools/v3/internal/format
gotest.tools/v3/internal/source
gotest.tools/v3/poll
gotest.tools/v3/skip
-# github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.20
+# k8s.io/klog/v2 v2.80.1
+## explicit; go 1.13
+k8s.io/klog/v2
+k8s.io/klog/v2/internal/buffer
+k8s.io/klog/v2/internal/clock
+k8s.io/klog/v2/internal/dbg
+k8s.io/klog/v2/internal/serialize
+k8s.io/klog/v2/internal/severity