summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xDockerfile.windows4
-rw-r--r--Makefile4
-rw-r--r--api/client/events.go1
-rw-r--r--cli/common.go2
-rwxr-xr-xcontrib/check-config.sh2
-rw-r--r--contrib/completion/bash/docker7
-rw-r--r--contrib/completion/zsh/_docker2
-rw-r--r--daemon/container_operations_unix.go17
-rw-r--r--daemon/execdriver/native/driver.go5
-rw-r--r--daemon/logdrivers_linux.go1
-rw-r--r--daemon/logger/gcplogs/gcplogging.go181
-rw-r--r--daemon/logger/journald/read.go1
-rw-r--r--daemon/logger/journald/read_native.go6
-rw-r--r--daemon/logger/journald/read_native_compat.go6
-rw-r--r--daemon/logger/jsonfilelog/jsonfilelog.go1
-rw-r--r--docker/daemon_test.go121
-rw-r--r--docker/daemon_unix_test.go122
-rw-r--r--docs/admin/logging/gcplogs.md70
-rw-r--r--docs/admin/logging/overview.md11
-rwxr-xr-xhack/make.sh4
-rwxr-xr-xhack/vendor.sh5
-rw-r--r--integration-cli/docker_cli_run_unix_test.go37
-rw-r--r--integration-cli/docker_cli_stats_test.go5
-rw-r--r--man/docker-create.1.md2
-rw-r--r--man/docker-daemon.8.md2
-rw-r--r--man/docker-logout.1.md2
-rw-r--r--man/docker-run.1.md2
-rw-r--r--man/docker.1.md2
-rw-r--r--migrate/v1/migratev1_test.go9
-rw-r--r--vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go18
-rw-r--r--vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go23
-rw-r--r--vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go79
-rw-r--r--vendor/src/golang.org/x/net/http2/.gitignore2
-rw-r--r--vendor/src/golang.org/x/net/http2/Dockerfile44
-rw-r--r--vendor/src/golang.org/x/net/http2/Makefile3
-rw-r--r--vendor/src/golang.org/x/net/http2/README20
-rw-r--r--vendor/src/golang.org/x/net/http2/buffer.go76
-rw-r--r--vendor/src/golang.org/x/net/http2/errors.go78
-rw-r--r--vendor/src/golang.org/x/net/http2/flow.go51
-rw-r--r--vendor/src/golang.org/x/net/http2/frame.go1113
-rw-r--r--vendor/src/golang.org/x/net/http2/gotrack.go173
-rw-r--r--vendor/src/golang.org/x/net/http2/headermap.go80
-rw-r--r--vendor/src/golang.org/x/net/http2/hpack/encode.go252
-rw-r--r--vendor/src/golang.org/x/net/http2/hpack/hpack.go445
-rw-r--r--vendor/src/golang.org/x/net/http2/hpack/huffman.go159
-rw-r--r--vendor/src/golang.org/x/net/http2/hpack/tables.go353
-rw-r--r--vendor/src/golang.org/x/net/http2/http2.go249
-rw-r--r--vendor/src/golang.org/x/net/http2/pipe.go43
-rw-r--r--vendor/src/golang.org/x/net/http2/server.go1780
-rw-r--r--vendor/src/golang.org/x/net/http2/transport.go553
-rw-r--r--vendor/src/golang.org/x/net/http2/write.go204
-rw-r--r--vendor/src/golang.org/x/net/http2/writesched.go286
-rw-r--r--vendor/src/golang.org/x/net/internal/timeseries/timeseries.go525
-rw-r--r--vendor/src/golang.org/x/net/trace/events.go524
-rw-r--r--vendor/src/golang.org/x/net/trace/histogram.go356
-rw-r--r--vendor/src/golang.org/x/net/trace/trace.go1057
-rw-r--r--vendor/src/golang.org/x/oauth2/.travis.yml14
-rw-r--r--vendor/src/golang.org/x/oauth2/AUTHORS3
-rw-r--r--vendor/src/golang.org/x/oauth2/CONTRIBUTING.md31
-rw-r--r--vendor/src/golang.org/x/oauth2/CONTRIBUTORS3
-rw-r--r--vendor/src/golang.org/x/oauth2/LICENSE27
-rw-r--r--vendor/src/golang.org/x/oauth2/README.md64
-rw-r--r--vendor/src/golang.org/x/oauth2/client_appengine.go25
-rw-r--r--vendor/src/golang.org/x/oauth2/google/appengine.go86
-rw-r--r--vendor/src/golang.org/x/oauth2/google/appengine_hook.go13
-rw-r--r--vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go14
-rw-r--r--vendor/src/golang.org/x/oauth2/google/default.go155
-rw-r--r--vendor/src/golang.org/x/oauth2/google/google.go145
-rw-r--r--vendor/src/golang.org/x/oauth2/google/jwt.go71
-rw-r--r--vendor/src/golang.org/x/oauth2/google/sdk.go168
-rw-r--r--vendor/src/golang.org/x/oauth2/internal/oauth2.go76
-rw-r--r--vendor/src/golang.org/x/oauth2/internal/token.go221
-rw-r--r--vendor/src/golang.org/x/oauth2/internal/transport.go69
-rw-r--r--vendor/src/golang.org/x/oauth2/jws/jws.go172
-rw-r--r--vendor/src/golang.org/x/oauth2/jwt/jwt.go153
-rw-r--r--vendor/src/golang.org/x/oauth2/oauth2.go337
-rw-r--r--vendor/src/golang.org/x/oauth2/token.go158
-rw-r--r--vendor/src/golang.org/x/oauth2/transport.go132
-rw-r--r--vendor/src/google.golang.org/api/LICENSE27
-rw-r--r--vendor/src/google.golang.org/api/gensupport/json.go177
-rw-r--r--vendor/src/google.golang.org/api/gensupport/params.go31
-rw-r--r--vendor/src/google.golang.org/api/googleapi/googleapi.go588
-rw-r--r--vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE18
-rw-r--r--vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go359
-rw-r--r--vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go13
-rw-r--r--vendor/src/google.golang.org/api/googleapi/types.go182
-rw-r--r--vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json1692
-rw-r--r--vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go4787
-rw-r--r--vendor/src/google.golang.org/cloud/.travis.yml11
-rw-r--r--vendor/src/google.golang.org/cloud/AUTHORS12
-rw-r--r--vendor/src/google.golang.org/cloud/CONTRIBUTING.md114
-rw-r--r--vendor/src/google.golang.org/cloud/CONTRIBUTORS24
-rw-r--r--vendor/src/google.golang.org/cloud/LICENSE202
-rw-r--r--vendor/src/google.golang.org/cloud/README.md135
-rw-r--r--vendor/src/google.golang.org/cloud/cloud.go49
-rw-r--r--vendor/src/google.golang.org/cloud/compute/metadata/metadata.go327
-rw-r--r--vendor/src/google.golang.org/cloud/internal/cloud.go128
-rw-r--r--vendor/src/google.golang.org/cloud/internal/opts/option.go24
-rw-r--r--vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go29
-rw-r--r--vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go31
-rw-r--r--vendor/src/google.golang.org/cloud/internal/transport/dial.go134
-rw-r--r--vendor/src/google.golang.org/cloud/internal/transport/proto.go80
-rw-r--r--vendor/src/google.golang.org/cloud/key.json.encbin0 -> 1248 bytes
-rw-r--r--vendor/src/google.golang.org/cloud/logging/logging.go468
-rw-r--r--vendor/src/google.golang.org/cloud/option.go102
-rw-r--r--vendor/src/google.golang.org/grpc/.travis.yml14
-rw-r--r--vendor/src/google.golang.org/grpc/CONTRIBUTING.md23
-rw-r--r--vendor/src/google.golang.org/grpc/Makefile50
-rw-r--r--vendor/src/google.golang.org/grpc/PATENTS22
-rw-r--r--vendor/src/google.golang.org/grpc/README.md32
-rw-r--r--vendor/src/google.golang.org/grpc/call.go192
-rw-r--r--vendor/src/google.golang.org/grpc/clientconn.go525
-rwxr-xr-xvendor/src/google.golang.org/grpc/codegen.sh17
-rw-r--r--vendor/src/google.golang.org/grpc/codes/code_string.go16
-rw-r--r--vendor/src/google.golang.org/grpc/codes/codes.go159
-rw-r--r--vendor/src/google.golang.org/grpc/credentials/credentials.go239
-rw-r--r--vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go177
-rw-r--r--vendor/src/google.golang.org/grpc/doc.go6
-rw-r--r--vendor/src/google.golang.org/grpc/grpclog/logger.go90
-rw-r--r--vendor/src/google.golang.org/grpc/metadata/metadata.go146
-rw-r--r--vendor/src/google.golang.org/grpc/picker.go93
-rw-r--r--vendor/src/google.golang.org/grpc/rpc_util.go337
-rw-r--r--vendor/src/google.golang.org/grpc/server.go542
-rw-r--r--vendor/src/google.golang.org/grpc/stream.go368
-rw-r--r--vendor/src/google.golang.org/grpc/trace.go120
-rw-r--r--vendor/src/google.golang.org/grpc/transport/control.go259
-rw-r--r--vendor/src/google.golang.org/grpc/transport/http2_client.go860
-rw-r--r--vendor/src/google.golang.org/grpc/transport/http2_server.go695
-rw-r--r--vendor/src/google.golang.org/grpc/transport/http_util.go451
-rw-r--r--vendor/src/google.golang.org/grpc/transport/transport.go465
130 files changed, 26814 insertions, 145 deletions
diff --git a/Dockerfile.windows b/Dockerfile.windows
index a3bd310b5d..78db5a3b34 100755
--- a/Dockerfile.windows
+++ b/Dockerfile.windows
@@ -40,8 +40,8 @@ FROM windowsservercore
# Environment variable notes:
# - GOLANG_VERSION must consistent with 'Dockerfile' used by Linux'.
# - FROM_DOCKERFILE is used for detection of building within a container.
-ENV GOLANG_VERSION=1.5.3 \
- GIT_LOCATION=https://github.com/git-for-windows/git/releases/download/v2.7.1.windows.2/Git-2.7.1.2-64-bit.exe \
+ENV GOLANG_VERSION=1.6 \
+ GIT_LOCATION=https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe \
RSRC_COMMIT=ba14da1f827188454a4591717fff29999010887f \
GOPATH=C:/go;C:/go/src/github.com/docker/docker/vendor \
FROM_DOCKERFILE=1
diff --git a/Makefile b/Makefile
index a56373e914..b455c12f43 100644
--- a/Makefile
+++ b/Makefile
@@ -54,6 +54,10 @@ DOCKER_ENVS := \
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
+# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
+# The volume will be cleaned up when the container is removed due to `--rm`.
+# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
+DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v "/go/src/github.com/docker/docker/bundles")
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
diff --git a/api/client/events.go b/api/client/events.go
index ad38204368..d2408c192e 100644
--- a/api/client/events.go
+++ b/api/client/events.go
@@ -121,7 +121,6 @@ func printOutput(event eventtypes.Message, output io.Writer) {
type eventHandler struct {
handlers map[string]func(eventtypes.Message)
mu sync.Mutex
- closed bool
}
func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) {
diff --git a/cli/common.go b/cli/common.go
index d2fa93d882..df6a6ec115 100644
--- a/cli/common.go
+++ b/cli/common.go
@@ -42,7 +42,7 @@ var dockerCommands = []Command{
{"inspect", "Return low-level information on a container or image"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive or STDIN"},
- {"login", "Register or log in to a Docker registry"},
+ {"login", "Log in to a Docker registry"},
{"logout", "Log out from a Docker registry"},
{"logs", "Fetch the logs of a container"},
{"network", "Manage Docker networks"},
diff --git a/contrib/check-config.sh b/contrib/check-config.sh
index 825a00e505..d87c684fea 100755
--- a/contrib/check-config.sh
+++ b/contrib/check-config.sh
@@ -182,7 +182,7 @@ flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
DEVPTS_MULTIPLE_INSTANCES
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG
- CONFIG_KEYS
+ KEYS
MACVLAN VETH BRIDGE BRIDGE_NETFILTER
NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index 23e1dfe268..cbb51c5d27 100644
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -397,6 +397,7 @@ __docker_complete_log_drivers() {
awslogs
etwlogs
fluentd
+ gcplogs
gelf
journald
json-file
@@ -410,13 +411,14 @@ __docker_complete_log_options() {
# see docs/reference/logging/index.md
local awslogs_options="awslogs-region awslogs-group awslogs-stream"
local fluentd_options="env fluentd-address labels tag"
+ local gcplogs_options="env gcp-log-cmd gcp-project labels"
local gelf_options="env gelf-address labels tag"
local journald_options="env labels tag"
local json_file_options="env labels max-file max-size"
local syslog_options="syslog-address syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify syslog-facility tag"
local splunk_options="env labels splunk-caname splunk-capath splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url tag"
- local all_options="$fluentd_options $gelf_options $journald_options $json_file_options $syslog_options $splunk_options"
+ local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $json_file_options $syslog_options $splunk_options"
case $(__docker_value_of_option --log-driver) in
'')
@@ -428,6 +430,9 @@ __docker_complete_log_options() {
fluentd)
COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) )
;;
+ gcplogs)
+ COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) )
+ ;;
gelf)
COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) )
;;
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index b17f5fd712..3a1f399f51 100644
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -201,6 +201,7 @@ __docker_get_log_options() {
awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream")
fluentd_options=("env" "fluentd-address" "labels" "tag")
+ gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels")
gelf_options=("env" "gelf-address" "labels" "tag")
journald_options=("env" "labels")
json_file_options=("env" "labels" "max-file" "max-size")
@@ -209,6 +210,7 @@ __docker_get_log_options() {
[[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0
[[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0
+ [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0
[[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0
[[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0
[[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index ea4ab2fd11..54a086d495 100644
--- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go
@@ -1107,7 +1107,16 @@ func killProcessDirectly(container *container.Container) error {
}
func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []*configs.Device, err error) {
- device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
+ resolvedPathOnHost := deviceMapping.PathOnHost
+
+ // check if it is a symbolic link
+ if src, e := os.Lstat(deviceMapping.PathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink {
+ if linkedPathOnHost, e := os.Readlink(deviceMapping.PathOnHost); e == nil {
+ resolvedPathOnHost = linkedPathOnHost
+ }
+ }
+
+ device, err := devices.DeviceFromPath(resolvedPathOnHost, deviceMapping.CgroupPermissions)
// if there was no error, return the device
if err == nil {
device.Path = deviceMapping.PathInContainer
@@ -1119,10 +1128,10 @@ func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []*con
if err == devices.ErrNotADevice {
// check if it is a directory
- if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
+ if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() {
// mount the internal devices recursively
- filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
+ filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error {
childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
if e != nil {
// ignore the device
@@ -1130,7 +1139,7 @@ func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []*con
}
// add the device to userSpecified devices
- childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
+ childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1)
devs = append(devs, childDevice)
return nil
diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go
index 6e74124b19..fb7ef26271 100644
--- a/daemon/execdriver/native/driver.go
+++ b/daemon/execdriver/native/driver.go
@@ -123,11 +123,6 @@ func NewDriver(root string, options []string) (*Driver, error) {
}, nil
}
-type execOutput struct {
- exitCode int
- err error
-}
-
// Run implements the exec driver Driver interface,
// it calls libcontainer APIs to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
diff --git a/daemon/logdrivers_linux.go b/daemon/logdrivers_linux.go
index 0abc6269de..89fe49a858 100644
--- a/daemon/logdrivers_linux.go
+++ b/daemon/logdrivers_linux.go
@@ -5,6 +5,7 @@ import (
// therefore they register themselves to the logdriver factory.
_ "github.com/docker/docker/daemon/logger/awslogs"
_ "github.com/docker/docker/daemon/logger/fluentd"
+ _ "github.com/docker/docker/daemon/logger/gcplogs"
_ "github.com/docker/docker/daemon/logger/gelf"
_ "github.com/docker/docker/daemon/logger/journald"
_ "github.com/docker/docker/daemon/logger/jsonfilelog"
diff --git a/daemon/logger/gcplogs/gcplogging.go b/daemon/logger/gcplogs/gcplogging.go
new file mode 100644
index 0000000000..b9b8af5871
--- /dev/null
+++ b/daemon/logger/gcplogs/gcplogging.go
@@ -0,0 +1,181 @@
+package gcplogs
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/docker/docker/daemon/logger"
+
+ "github.com/Sirupsen/logrus"
+ "golang.org/x/net/context"
+ "google.golang.org/cloud/compute/metadata"
+ "google.golang.org/cloud/logging"
+)
+
+const (
+ name = "gcplogs"
+
+ projectOptKey = "gcp-project"
+ logLabelsKey = "labels"
+ logEnvKey = "env"
+ logCmdKey = "gcp-log-cmd"
+)
+
+var (
+ // The number of logs the gcplogs driver has dropped.
+ droppedLogs uint64
+
+ onGCE = metadata.OnGCE()
+
+ // instance metadata populated from the metadata server if available
+ projectID string
+ zone string
+ instanceName string
+ instanceID string
+)
+
+func init() {
+ if onGCE {
+ // These will fail on instances if the metadata service is
+ // down or the client is compiled with an API version that
+ // has been removed. Since these are not vital, let's ignore
+ // them and make their fields in the dockeLogEntry ,omitempty
+ projectID, _ = metadata.ProjectID()
+ zone, _ = metadata.Zone()
+ instanceName, _ = metadata.InstanceName()
+ instanceID, _ = metadata.InstanceID()
+ }
+
+ if err := logger.RegisterLogDriver(name, New); err != nil {
+ logrus.Fatal(err)
+ }
+
+ if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil {
+ logrus.Fatal(err)
+ }
+}
+
+type gcplogs struct {
+ client *logging.Client
+ instance *instanceInfo
+ container *containerInfo
+}
+
+type dockerLogEntry struct {
+ Instance *instanceInfo `json:"instance,omitempty"`
+ Container *containerInfo `json:"container,omitempty"`
+ Data string `json:"data,omitempty"`
+}
+
+type instanceInfo struct {
+ Zone string `json:"zone,omitempty"`
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+}
+
+type containerInfo struct {
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+ ImageName string `json:"imageName,omitempty"`
+ ImageID string `json:"imageId,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Command string `json:"command,omitempty"`
+ Metadata map[string]string `json:"metadata,omitempty"`
+}
+
+// New creates a new logger that logs to Google Cloud Logging using the application
+// default credentials.
+//
+// See https://developers.google.com/identity/protocols/application-default-credentials
+func New(ctx logger.Context) (logger.Logger, error) {
+
+ var project string
+ if projectID != "" {
+ project = projectID
+ }
+ if projectID, found := ctx.Config[projectOptKey]; found {
+ project = projectID
+ }
+ if project == "" {
+ return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project")
+ }
+
+ c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver")
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.Ping(); err != nil {
+ return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err)
+ }
+
+ l := &gcplogs{
+ client: c,
+ container: &containerInfo{
+ Name: ctx.ContainerName,
+ ID: ctx.ContainerID,
+ ImageName: ctx.ContainerImageName,
+ ImageID: ctx.ContainerImageID,
+ Created: ctx.ContainerCreated,
+ Metadata: ctx.ExtraAttributes(nil),
+ },
+ }
+
+ if ctx.Config[logCmdKey] == "true" {
+ l.container.Command = ctx.Command()
+ }
+
+ if onGCE {
+ l.instance = &instanceInfo{
+ Zone: zone,
+ Name: instanceName,
+ ID: instanceID,
+ }
+ }
+
+ // The logger "overflows" at a rate of 10,000 logs per second and this
+ // overflow func is called. We want to surface the error to the user
+ // without overly spamming /var/log/docker.log so we log the first time
+ // we overflow and every 1000th time after.
+ c.Overflow = func(_ *logging.Client, _ logging.Entry) error {
+ if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 {
+ logrus.Errorf("gcplogs driver has dropped %v logs", i)
+ }
+ return nil
+ }
+
+ return l, nil
+}
+
+// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs
+// driver doesn't take any arguments.
+func ValidateLogOpts(cfg map[string]string) error {
+ for k := range cfg {
+ switch k {
+ case projectOptKey, logLabelsKey, logEnvKey, logCmdKey:
+ default:
+ return fmt.Errorf("%q is not a valid option for the gcplogs driver", k)
+ }
+ }
+ return nil
+}
+
+func (l *gcplogs) Log(m *logger.Message) error {
+ return l.client.Log(logging.Entry{
+ Time: m.Timestamp,
+ Payload: &dockerLogEntry{
+ Instance: l.instance,
+ Container: l.container,
+ Data: string(m.Line),
+ },
+ })
+}
+
+func (l *gcplogs) Close() error {
+ return l.client.Flush()
+}
+
+func (l *gcplogs) Name() string {
+ return name
+}
diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go
index 80c1fbda7a..8d94c302fb 100644
--- a/daemon/logger/journald/read.go
+++ b/daemon/logger/journald/read.go
@@ -2,7 +2,6 @@
package journald
-// #cgo pkg-config: libsystemd-journal
// #include <sys/types.h>
// #include <sys/poll.h>
// #include <systemd/sd-journal.h>
diff --git a/daemon/logger/journald/read_native.go b/daemon/logger/journald/read_native.go
new file mode 100644
index 0000000000..bba6de55be
--- /dev/null
+++ b/daemon/logger/journald/read_native.go
@@ -0,0 +1,6 @@
+// +build linux,cgo,!static_build,journald,!journald_compat
+
+package journald
+
+// #cgo pkg-config: libsystemd
+import "C"
diff --git a/daemon/logger/journald/read_native_compat.go b/daemon/logger/journald/read_native_compat.go
new file mode 100644
index 0000000000..3f7a43c59e
--- /dev/null
+++ b/daemon/logger/journald/read_native_compat.go
@@ -0,0 +1,6 @@
+// +build linux,cgo,!static_build,journald,journald_compat
+
+package journald
+
+// #cgo pkg-config: libsystemd-journal
+import "C"
diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go
index 60b6088630..9faa4e02db 100644
--- a/daemon/logger/jsonfilelog/jsonfilelog.go
+++ b/daemon/logger/jsonfilelog/jsonfilelog.go
@@ -25,7 +25,6 @@ type JSONFileLogger struct {
buf *bytes.Buffer
writer *loggerutils.RotateFileWriter
mu sync.Mutex
- ctx logger.Context
readers map[*logger.LogWatcher]struct{} // stores the active log followers
extra []byte // json-encoded extra attributes
}
diff --git a/docker/daemon_test.go b/docker/daemon_test.go
index 1be2ab8164..322e0b7604 100644
--- a/docker/daemon_test.go
+++ b/docker/daemon_test.go
@@ -247,124 +247,3 @@ func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) {
t.Fatalf("expected LogConfig type syslog, got %v", loadedConfig.LogConfig.Type)
}
}
-
-func TestLoadDaemonConfigWithMapOptions(t *testing.T) {
- c := &daemon.Config{}
- common := &cli.CommonFlags{}
- flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
-
- flags.Var(opts.NewNamedMapOpts("cluster-store-opts", c.ClusterOpts, nil), []string{"-cluster-store-opt"}, "")
- flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "")
-
- f, err := ioutil.TempFile("", "docker-config-")
- if err != nil {
- t.Fatal(err)
- }
-
- configFile := f.Name()
- f.Write([]byte(`{
- "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"},
- "log-opts": {"tag": "test"}
-}`))
- f.Close()
-
- loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile)
- if err != nil {
- t.Fatal(err)
- }
- if loadedConfig == nil {
- t.Fatal("expected configuration, got nil")
- }
- if loadedConfig.ClusterOpts == nil {
- t.Fatal("expected cluster options, got nil")
- }
-
- expectedPath := "/var/lib/docker/discovery_certs/ca.pem"
- if caPath := loadedConfig.ClusterOpts["kv.cacertfile"]; caPath != expectedPath {
- t.Fatalf("expected %s, got %s", expectedPath, caPath)
- }
-
- if loadedConfig.LogConfig.Config == nil {
- t.Fatal("expected log config options, got nil")
- }
- if tag := loadedConfig.LogConfig.Config["tag"]; tag != "test" {
- t.Fatalf("expected log tag `test`, got %s", tag)
- }
-}
-
-func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) {
- c := &daemon.Config{}
- common := &cli.CommonFlags{}
- flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
- flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "")
-
- f, err := ioutil.TempFile("", "docker-config-")
- if err != nil {
- t.Fatal(err)
- }
-
- if err := flags.ParseFlags([]string{}, false); err != nil {
- t.Fatal(err)
- }
-
- configFile := f.Name()
- f.Write([]byte(`{
- "userland-proxy": false
-}`))
- f.Close()
-
- loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile)
- if err != nil {
- t.Fatal(err)
- }
- if loadedConfig == nil {
- t.Fatal("expected configuration, got nil")
- }
-
- if loadedConfig.EnableUserlandProxy {
- t.Fatal("expected userland proxy to be disabled, got enabled")
- }
-
- // make sure reloading doesn't generate configuration
- // conflicts after normalizing boolean values.
- err = daemon.ReloadConfiguration(configFile, flags, func(reloadedConfig *daemon.Config) {
- if reloadedConfig.EnableUserlandProxy {
- t.Fatal("expected userland proxy to be disabled, got enabled")
- }
- })
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) {
- c := &daemon.Config{}
- common := &cli.CommonFlags{}
- flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
- flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "")
-
- f, err := ioutil.TempFile("", "docker-config-")
- if err != nil {
- t.Fatal(err)
- }
-
- if err := flags.ParseFlags([]string{}, false); err != nil {
- t.Fatal(err)
- }
-
- configFile := f.Name()
- f.Write([]byte(`{}`))
- f.Close()
-
- loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile)
- if err != nil {
- t.Fatal(err)
- }
- if loadedConfig == nil {
- t.Fatal("expected configuration, got nil")
- }
-
- if !loadedConfig.EnableUserlandProxy {
- t.Fatal("expected userland proxy to be enabled, got disabled")
- }
-}
diff --git a/docker/daemon_unix_test.go b/docker/daemon_unix_test.go
index 889482b007..58b692b532 100644
--- a/docker/daemon_unix_test.go
+++ b/docker/daemon_unix_test.go
@@ -8,6 +8,7 @@ import (
"github.com/docker/docker/cli"
"github.com/docker/docker/daemon"
+ "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/mflag"
)
@@ -41,3 +42,124 @@ func TestLoadDaemonConfigWithNetwork(t *testing.T) {
t.Fatalf("expected DefaultIP 127.0.0.1, got %s", loadedConfig.DefaultIP)
}
}
+
+func TestLoadDaemonConfigWithMapOptions(t *testing.T) {
+ c := &daemon.Config{}
+ common := &cli.CommonFlags{}
+ flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+
+ flags.Var(opts.NewNamedMapOpts("cluster-store-opts", c.ClusterOpts, nil), []string{"-cluster-store-opt"}, "")
+ flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "")
+
+ f, err := ioutil.TempFile("", "docker-config-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ configFile := f.Name()
+ f.Write([]byte(`{
+ "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"},
+ "log-opts": {"tag": "test"}
+}`))
+ f.Close()
+
+ loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if loadedConfig == nil {
+ t.Fatal("expected configuration, got nil")
+ }
+ if loadedConfig.ClusterOpts == nil {
+ t.Fatal("expected cluster options, got nil")
+ }
+
+ expectedPath := "/var/lib/docker/discovery_certs/ca.pem"
+ if caPath := loadedConfig.ClusterOpts["kv.cacertfile"]; caPath != expectedPath {
+ t.Fatalf("expected %s, got %s", expectedPath, caPath)
+ }
+
+ if loadedConfig.LogConfig.Config == nil {
+ t.Fatal("expected log config options, got nil")
+ }
+ if tag := loadedConfig.LogConfig.Config["tag"]; tag != "test" {
+ t.Fatalf("expected log tag `test`, got %s", tag)
+ }
+}
+
+func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) {
+ c := &daemon.Config{}
+ common := &cli.CommonFlags{}
+ flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+ flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "")
+
+ f, err := ioutil.TempFile("", "docker-config-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := flags.ParseFlags([]string{}, false); err != nil {
+ t.Fatal(err)
+ }
+
+ configFile := f.Name()
+ f.Write([]byte(`{
+ "userland-proxy": false
+}`))
+ f.Close()
+
+ loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if loadedConfig == nil {
+ t.Fatal("expected configuration, got nil")
+ }
+
+ if loadedConfig.EnableUserlandProxy {
+ t.Fatal("expected userland proxy to be disabled, got enabled")
+ }
+
+ // make sure reloading doesn't generate configuration
+ // conflicts after normalizing boolean values.
+ err = daemon.ReloadConfiguration(configFile, flags, func(reloadedConfig *daemon.Config) {
+ if reloadedConfig.EnableUserlandProxy {
+ t.Fatal("expected userland proxy to be disabled, got enabled")
+ }
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) {
+ c := &daemon.Config{}
+ common := &cli.CommonFlags{}
+ flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+ flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "")
+
+ f, err := ioutil.TempFile("", "docker-config-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := flags.ParseFlags([]string{}, false); err != nil {
+ t.Fatal(err)
+ }
+
+ configFile := f.Name()
+ f.Write([]byte(`{}`))
+ f.Close()
+
+ loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if loadedConfig == nil {
+ t.Fatal("expected configuration, got nil")
+ }
+
+ if !loadedConfig.EnableUserlandProxy {
+ t.Fatal("expected userland proxy to be enabled, got disabled")
+ }
+}
diff --git a/docs/admin/logging/gcplogs.md b/docs/admin/logging/gcplogs.md
new file mode 100644
index 0000000000..08fd858da0
--- /dev/null
+++ b/docs/admin/logging/gcplogs.md
@@ -0,0 +1,70 @@
+<!--[metadata]>
++++
+title = "Google Cloud Logging driver"
+description = "Describes how to use the Google Cloud Logging driver."
+keywords = ["gcplogs, google, docker, logging, driver"]
+[menu.main]
+parent = "smn_logging"
+weight = 2
++++
+<![end-metadata]-->
+
+# Google Cloud Logging driver
+
+The Google Cloud Logging driver sends container logs to <a href="https://cloud.google.com/logging/docs/" target="_blank">Google Cloud
+Logging</a>.
+
+## Usage
+
+You can configure the default logging driver by passing the `--log-driver`
+option to the Docker daemon:
+
+ docker daemon --log-driver=gcplogs
+
+You can set the logging driver for a specific container by using the
+`--log-driver` option to `docker run`:
+
+ docker run --log-driver=gcplogs ...
+
+This log driver does not implement a reader so it is incompatible with
+`docker logs`.
+
+If Docker detects that it is running in a Google Cloud Project, it will discover configuration
+from the <a href="https://cloud.google.com/compute/docs/metadata" target="_blank">instance metadata service</a>.
+Otherwise, the user must specify which project to log to using the `--gcp-project`
+log option and Docker will attempt to obtain credentials from the
+<a href="https://developers.google.com/identity/protocols/application-default-credentials" target="_blank">Google Application Default Credential</a>.
+The `--gcp-project` takes precedence over information discovered from the metadata server
+so a Docker daemon running in a Google Cloud Project can be overriden to log to a different
+Google Cloud Project using `--gcp-project`.
+
+## gcplogs options
+
+You can use the `--log-opt NAME=VALUE` flag to specify these additional Google
+Cloud Logging driver options:
+
+| Option | Required | Description |
+|-----------------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------|
+| `gcp-project` | optional | Which GCP project to log to. Defaults to discovering this value from the GCE metadata service. |
+| `gcp-log-cmd` | optional | Whether to log the command that the container was started with. Defaults to false. |
+| `labels` | optional | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. |
+| `env` | optional | Comma-separated list of keys of environment variables, which should be included in message, if these variables are specified for container. |
+
+If there is collision between `label` and `env` keys, the value of the `env`
+takes precedence. Both options add additional fields to the attributes of a
+logging message.
+
+Below is an example of the logging options required to log to the default
+logging destination which is discovered by querying the GCE metadata server.
+
+ docker run --log-driver=gcplogs \
+ --log-opt labels=location
+ --log-opt env=TEST
+ --log-opt gcp-log-cmd=true
+ --env "TEST=false"
+ --label location=west
+ your/application
+
+This configuration also directs the driver to include in the payload the label
+`location`, the environment variable `ENV`, and the command used to start the
+container.
diff --git a/docs/admin/logging/overview.md b/docs/admin/logging/overview.md
index 825e3ecac0..e3d3d11256 100644
--- a/docs/admin/logging/overview.md
+++ b/docs/admin/logging/overview.md
@@ -27,6 +27,7 @@ container's logging driver. The following options are supported:
| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs. |
| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using HTTP Event Collector. |
| `etwlogs` | ETW logging driver for Docker on Windows. Writes log messages as ETW events. |
+| `gcplogs` | Google Cloud Logging driver for Docker. Writes log messages to Google Cloud Logging. |
The `docker logs`command is available only for the `json-file` and `journald`
logging drivers.
@@ -213,4 +214,14 @@ as an ETW event. An ETW listener can then be created to listen for these events.
For detailed information on working with this logging driver, see [the ETW logging driver](etwlogs.md) reference documentation.
+## Google Cloud Logging
+The Google Cloud Logging driver supports the following options:
+
+ --log-opt gcp-project=<gcp_projext>
+ --log-opt labels=<label1>,<label2>
+ --log-opt env=<envvar1>,<envvar2>
+ --log-opt log-cmd=true
+
+For detailed information about working with this logging driver, see the [Google Cloud Logging driver](gcplogs.md).
+reference documentation.
diff --git a/hack/make.sh b/hack/make.sh
index 8958ada0b7..c0c8914b64 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -118,8 +118,10 @@ fi
if [ -z "$DOCKER_CLIENTONLY" ]; then
DOCKER_BUILDTAGS+=" daemon"
- if pkg-config libsystemd-journal 2> /dev/null ; then
+ if pkg-config 'libsystemd >= 209' 2> /dev/null ; then
DOCKER_BUILDTAGS+=" journald"
+ elif pkg-config 'libsystemd-journal' 2> /dev/null ; then
+ DOCKER_BUILDTAGS+=" journald journald_compat"
fi
fi
diff --git a/hack/vendor.sh b/hack/vendor.sh
index d025766f17..8e0831b4aa 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -82,4 +82,9 @@ clone git gopkg.in/fsnotify.v1 v1.2.0
clone git github.com/aws/aws-sdk-go v0.9.9
clone git github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1
+# gcplogs deps
+clone git golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be https://github.com/golang/oauth2.git
+clone git google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 https://code.googlesource.com/google-api-go-client
+clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud
+
clean
diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go
index 974249e504..173f6b5bbd 100644
--- a/integration-cli/docker_cli_run_unix_test.go
+++ b/integration-cli/docker_cli_run_unix_test.go
@@ -919,3 +919,40 @@ func (s *DockerSuite) TestRunSeccompWithDefaultProfile(c *check.C) {
c.Assert(err, checker.NotNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Equals, "unshare: unshare failed: Operation not permitted")
}
+
+// TestRunDeviceSymlink checks run with device that follows symlink (#13840)
+func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) {
+ testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, SameHostDaemon)
+ if _, err := os.Stat("/dev/zero"); err != nil {
+ c.Skip("Host does not have /dev/zero")
+ }
+
+ // Create a temporary directory to create symlink
+ tmpDir, err := ioutil.TempDir("", "docker_device_follow_symlink_tests")
+ c.Assert(err, checker.IsNil)
+
+ defer os.RemoveAll(tmpDir)
+
+ // Create a symbolic link to /dev/zero
+ symZero := filepath.Join(tmpDir, "zero")
+ err = os.Symlink("/dev/zero", symZero)
+ c.Assert(err, checker.IsNil)
+
+ // Create a temporary file "temp" inside tmpDir, write some data to "tmpDir/temp",
+ // then create a symlink "tmpDir/file" to the temporary file "tmpDir/temp".
+ tmpFile := filepath.Join(tmpDir, "temp")
+ err = ioutil.WriteFile(tmpFile, []byte("temp"), 0666)
+ c.Assert(err, checker.IsNil)
+ symFile := filepath.Join(tmpDir, "file")
+ err = os.Symlink(tmpFile, symFile)
+ c.Assert(err, checker.IsNil)
+
+ // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23
+ out, _ := dockerCmd(c, "run", "--device", symZero+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum")
+ c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23"))
+
+ // symlink "tmpDir/file" to a file "tmpDir/temp" will result in an error as it is not a device.
+ out, _, err = dockerCmdWithError("run", "--device", symFile+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum")
+ c.Assert(err, check.NotNil)
+ c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "not a device node", check.Commentf("expected output 'not a device node'"))
+}
diff --git a/integration-cli/docker_cli_stats_test.go b/integration-cli/docker_cli_stats_test.go
index 4a3682bb9e..42c76ba30d 100644
--- a/integration-cli/docker_cli_stats_test.go
+++ b/integration-cli/docker_cli_stats_test.go
@@ -97,7 +97,10 @@ func (s *DockerSuite) TestStatsAllNoStream(c *check.C) {
func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) {
// Windows does not support stats
- testRequires(c, DaemonIsLinux)
+ // TODO: remove SameHostDaemon
+ // The reason it was added is because, there seems to be some race that makes this test fail
+ // for remote daemons (namely in the win2lin CI). We highly welcome contributions to fix this.
+ testRequires(c, DaemonIsLinux, SameHostDaemon)
id := make(chan string)
addedChan := make(chan struct{})
diff --git a/man/docker-create.1.md b/man/docker-create.1.md
index 36f0d94ef3..6a2640d205 100644
--- a/man/docker-create.1.md
+++ b/man/docker-create.1.md
@@ -214,7 +214,7 @@ millions of trillions.
Add link to another container in the form of <name or id>:alias or just
<name or id> in which case the alias will match the name.
-**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*none*"
+**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
Logging driver for container. Default is defined by daemon `--log-driver` flag.
**Warning**: the `docker logs` command works only for the `json-file` and
`journald` logging drivers.
diff --git a/man/docker-daemon.8.md b/man/docker-daemon.8.md
index c7ab68628b..9f699c7124 100644
--- a/man/docker-daemon.8.md
+++ b/man/docker-daemon.8.md
@@ -185,7 +185,7 @@ unix://[/path/to/socket] to use.
**--label**="[]"
Set key=value labels to the daemon (displayed in `docker info`)
-**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*none*"
+**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
Default driver for container logs. Default is `json-file`.
**Warning**: `docker logs` command works only for `json-file` logging driver.
diff --git a/man/docker-logout.1.md b/man/docker-logout.1.md
index d116986798..a8a4b7c3c0 100644
--- a/man/docker-logout.1.md
+++ b/man/docker-logout.1.md
@@ -24,7 +24,7 @@ There are no available options.
# docker logout localhost:8080
# See also
-**docker-login(1)** to register or log in to a Docker registry server.
+**docker-login(1)** to log in to a Docker registry server.
# HISTORY
June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io)
diff --git a/man/docker-run.1.md b/man/docker-run.1.md
index 90e3ebdf44..bf75fb68ef 100644
--- a/man/docker-run.1.md
+++ b/man/docker-run.1.md
@@ -320,7 +320,7 @@ container can access the exposed port via a private networking interface. Docker
will set some environment variables in the client container to help indicate
which interface and port to use.
-**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*none*"
+**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
Logging driver for container. Default is defined by daemon `--log-driver` flag.
**Warning**: the `docker logs` command works only for the `json-file` and
`journald` logging drivers.
diff --git a/man/docker.1.md b/man/docker.1.md
index f2bb68f2ce..f59f98abdf 100644
--- a/man/docker.1.md
+++ b/man/docker.1.md
@@ -132,7 +132,7 @@ inside it)
See **docker-load(1)** for full documentation on the **load** command.
**login**
- Register or login to a Docker Registry
+ Log in to a Docker Registry
See **docker-login(1)** for full documentation on the **login** command.
**logout**
diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go
index 6e8af7fdc8..73878f11e9 100644
--- a/migrate/v1/migratev1_test.go
+++ b/migrate/v1/migratev1_test.go
@@ -10,6 +10,7 @@ import (
"os"
"path/filepath"
"reflect"
+ "runtime"
"testing"
"github.com/docker/distribution/digest"
@@ -62,6 +63,10 @@ func TestMigrateRefs(t *testing.T) {
}
func TestMigrateContainers(t *testing.T) {
+ // TODO Windows: Figure out why this is failing
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
tmpdir, err := ioutil.TempDir("", "migrate-containers")
if err != nil {
t.Fatal(err)
@@ -133,6 +138,10 @@ func TestMigrateContainers(t *testing.T) {
}
func TestMigrateImages(t *testing.T) {
+ // TODO Windows: Figure out why this is failing
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
tmpdir, err := ioutil.TempDir("", "migrate-images")
if err != nil {
t.Fatal(err)
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go
new file mode 100644
index 0000000000..48610e3627
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package ctxhttp
+
+import "net/http"
+
+func canceler(client *http.Client, req *http.Request) func() {
+ ch := make(chan struct{})
+ req.Cancel = ch
+
+ return func() {
+ close(ch)
+ }
+}
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
new file mode 100644
index 0000000000..56bcbadb85
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package ctxhttp
+
+import "net/http"
+
+type requestCanceler interface {
+ CancelRequest(*http.Request)
+}
+
+func canceler(client *http.Client, req *http.Request) func() {
+ rc, ok := client.Transport.(requestCanceler)
+ if !ok {
+ return func() {}
+ }
+ return func() {
+ rc.CancelRequest(req)
+ }
+}
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 0000000000..504dd63ed9
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
+ cancel := canceler(client, req)
+
+ type responseAndError struct {
+ resp *http.Response
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ go func() {
+ resp, err := client.Do(req)
+ result <- responseAndError{resp, err}
+ }()
+
+ select {
+ case <-ctx.Done():
+ cancel()
+ return nil, ctx.Err()
+ case r := <-result:
+ return r.resp, r.err
+ }
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/src/golang.org/x/net/http2/.gitignore b/vendor/src/golang.org/x/net/http2/.gitignore
new file mode 100644
index 0000000000..190f12234a
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/.gitignore
@@ -0,0 +1,2 @@
+*~
+h2i/h2i
diff --git a/vendor/src/golang.org/x/net/http2/Dockerfile b/vendor/src/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 0000000000..b4e14d55a5
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,44 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+ autotools-dev libtool pkg-config zlib1g-dev \
+ libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+ automake autoconf
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER af24f8394e43f4
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz
+RUN tar -zxvf curl-7.40.0.tar.gz
+WORKDIR /root/curl-7.40.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/vendor/src/golang.org/x/net/http2/Makefile b/vendor/src/golang.org/x/net/http2/Makefile
new file mode 100644
index 0000000000..55fd826f77
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+ docker build -t gohttp2/curl .
+
diff --git a/vendor/src/golang.org/x/net/http2/README b/vendor/src/golang.org/x/net/http2/README
new file mode 100644
index 0000000000..360d5aa379
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/README
@@ -0,0 +1,20 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use. It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+ but are being worked on.
+* The client work has just started but shares a lot of code
+ is coming along much quicker.
+
+Docs are at https://godoc.org/golang.org/x/net/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/vendor/src/golang.org/x/net/http2/buffer.go b/vendor/src/golang.org/x/net/http2/buffer.go
new file mode 100644
index 0000000000..c43954cf04
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/buffer.go
@@ -0,0 +1,76 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+ "errors"
+)
+
+// buffer is an io.ReadWriteCloser backed by a fixed size buffer.
+// It never allocates, but moves old data as new data is written.
+type buffer struct {
+ buf []byte
+ r, w int
+ closed bool
+ err error // err to return to reader
+}
+
+var (
+ errReadEmpty = errors.New("read from empty buffer")
+ errWriteClosed = errors.New("write on closed buffer")
+ errWriteFull = errors.New("write on full buffer")
+)
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *buffer) Read(p []byte) (n int, err error) {
+ n = copy(p, b.buf[b.r:b.w])
+ b.r += n
+ if b.closed && b.r == b.w {
+ err = b.err
+ } else if b.r == b.w && n == 0 {
+ err = errReadEmpty
+ }
+ return n, err
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *buffer) Len() int {
+ return b.w - b.r
+}
+
+// Write copies bytes from p into the buffer.
+// It is an error to write more data than the buffer can hold.
+func (b *buffer) Write(p []byte) (n int, err error) {
+ if b.closed {
+ return 0, errWriteClosed
+ }
+
+ // Slide existing data to beginning.
+ if b.r > 0 && len(p) > len(b.buf)-b.w {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ // Write new data.
+ n = copy(b.buf[b.w:], p)
+ b.w += n
+ if n < len(p) {
+ err = errWriteFull
+ }
+ return n, err
+}
+
+// Close marks the buffer as closed. Future calls to Write will
+// return an error. Future calls to Read, once the buffer is
+// empty, will return err.
+func (b *buffer) Close(err error) {
+ if !b.closed {
+ b.closed = true
+ b.err = err
+ }
+}
diff --git a/vendor/src/golang.org/x/net/http2/errors.go b/vendor/src/golang.org/x/net/http2/errors.go
new file mode 100644
index 0000000000..c885328a82
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/errors.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import "fmt"
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+ ErrCodeNo ErrCode = 0x0
+ ErrCodeProtocol ErrCode = 0x1
+ ErrCodeInternal ErrCode = 0x2
+ ErrCodeFlowControl ErrCode = 0x3
+ ErrCodeSettingsTimeout ErrCode = 0x4
+ ErrCodeStreamClosed ErrCode = 0x5
+ ErrCodeFrameSize ErrCode = 0x6
+ ErrCodeRefusedStream ErrCode = 0x7
+ ErrCodeCancel ErrCode = 0x8
+ ErrCodeCompression ErrCode = 0x9
+ ErrCodeConnect ErrCode = 0xa
+ ErrCodeEnhanceYourCalm ErrCode = 0xb
+ ErrCodeInadequateSecurity ErrCode = 0xc
+ ErrCodeHTTP11Required ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+ ErrCodeNo: "NO_ERROR",
+ ErrCodeProtocol: "PROTOCOL_ERROR",
+ ErrCodeInternal: "INTERNAL_ERROR",
+ ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ ErrCodeStreamClosed: "STREAM_CLOSED",
+ ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ ErrCodeRefusedStream: "REFUSED_STREAM",
+ ErrCodeCancel: "CANCEL",
+ ErrCodeCompression: "COMPRESSION_ERROR",
+ ErrCodeConnect: "CONNECT_ERROR",
+ ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+ StreamID uint32
+ Code ErrCode
+}
+
+func (e StreamError) Error() string {
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
diff --git a/vendor/src/golang.org/x/net/http2/flow.go b/vendor/src/golang.org/x/net/http2/flow.go
new file mode 100644
index 0000000000..540fc4283e
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/flow.go
@@ -0,0 +1,51 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+ // n is the number of DATA bytes we're allowed to send.
+ // A flow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level flow that is
+ // shared by all streams on that conn. It is nil for the flow
+ // that's on the conn directly.
+ conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *flow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+ remain := (1<<31 - 1) - f.n
+ if n > remain {
+ return false
+ }
+ f.n += n
+ return true
+}
diff --git a/vendor/src/golang.org/x/net/http2/frame.go b/vendor/src/golang.org/x/net/http2/frame.go
new file mode 100644
index 0000000000..e8b872a19b
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1113 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+ FrameData FrameType = 0x0
+ FrameHeaders FrameType = 0x1
+ FramePriority FrameType = 0x2
+ FrameRSTStream FrameType = 0x3
+ FrameSettings FrameType = 0x4
+ FramePushPromise FrameType = 0x5
+ FramePing FrameType = 0x6
+ FrameGoAway FrameType = 0x7
+ FrameWindowUpdate FrameType = 0x8
+ FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+ FrameData: "DATA",
+ FrameHeaders: "HEADERS",
+ FramePriority: "PRIORITY",
+ FrameRSTStream: "RST_STREAM",
+ FrameSettings: "SETTINGS",
+ FramePushPromise: "PUSH_PROMISE",
+ FramePing: "PING",
+ FrameGoAway: "GOAWAY",
+ FrameWindowUpdate: "WINDOW_UPDATE",
+ FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+ if s, ok := frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ FlagDataEndStream Flags = 0x1
+ FlagDataPadded Flags = 0x8
+
+ // Headers Frame
+ FlagHeadersEndStream Flags = 0x1
+ FlagHeadersEndHeaders Flags = 0x4
+ FlagHeadersPadded Flags = 0x8
+ FlagHeadersPriority Flags = 0x20
+
+ // Settings Frame
+ FlagSettingsAck Flags = 0x1
+
+ // Ping Frame
+ FlagPingAck Flags = 0x1
+
+ // Continuation Frame
+ FlagContinuationEndHeaders Flags = 0x4
+
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+ FrameData: {
+ FlagDataEndStream: "END_STREAM",
+ FlagDataPadded: "PADDED",
+ },
+ FrameHeaders: {
+ FlagHeadersEndStream: "END_STREAM",
+ FlagHeadersEndHeaders: "END_HEADERS",
+ FlagHeadersPadded: "PADDED",
+ FlagHeadersPriority: "PRIORITY",
+ },
+ FrameSettings: {
+ FlagSettingsAck: "ACK",
+ },
+ FramePing: {
+ FlagPingAck: "ACK",
+ },
+ FrameContinuation: {
+ FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ FramePushPromise: {
+ FlagPushPromiseEndHeaders: "END_HEADERS",
+ FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+ FrameData: parseDataFrame,
+ FrameHeaders: parseHeadersFrame,
+ FramePriority: parsePriorityFrame,
+ FrameRSTStream: parseRSTStreamFrame,
+ FrameSettings: parseSettingsFrame,
+ FramePushPromise: parsePushPromise,
+ FramePing: parsePingFrame,
+ FrameGoAway: parseGoAwayFrame,
+ FrameWindowUpdate: parseWindowUpdateFrame,
+ FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+ if f := frameParsers[t]; f != nil {
+ return f
+ }
+ return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := flagName[h.Type][Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(&buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(&buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(&buf, " len=%d]", h.Length)
+ return buf.String()
+}
+
+func (h *FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+ bufp := fhBytes.Get().(*[]byte)
+ defer fhBytes.Put(bufp)
+ return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:frameHeaderLen])
+ if err != nil {
+ return FrameHeader{}, err
+ }
+ return FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: FrameType(buf[3]),
+ Flags: Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+ Header() FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+ r io.Reader
+ lastFrame Frame
+
+ maxReadSize uint32
+ headerBuf [frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - frameHeaderLen
+ if length >= (1 << 24) {
+ return ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ minMaxFrameSize = 1 << 14
+ maxFrameSize = 1<<24 - 1
+)
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+ fr := &Framer{
+ w: w,
+ r: r,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+ if v > maxFrameSize {
+ v = maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+// If the frame is larger than previously set with SetMaxReadFrameSize,
+// the returned error is ErrFrameTooLarge.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := typeFrameParser(fh.Type)(fh, payload)
+ if err != nil {
+ return nil, err
+ }
+ fr.lastFrame = f
+ return f, nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+ FrameHeader
+ data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &DataFrame{
+ FrameHeader: fh,
+ }
+ var padSize byte
+ if fh.Flags.Has(FlagDataPadded) {
+ var err error
+ payload, padSize, err = readByte(payload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var errStreamID = errors.New("invalid streamid")
+
+func validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ // TODO: ignoring padding for now. will add when somebody cares.
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endStream {
+ flags |= FlagDataEndStream
+ }
+ f.startWrite(FrameData, flags, streamID)
+ f.wbuf = append(f.wbuf, data...)
+ return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+ FrameHeader
+ p []byte
+}
+
+func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ // Expecting even number of 6 byte settings.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ f := &SettingsFrame{FrameHeader: fh, p: p}
+ if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, ConnectionError(ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+ return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+ if settingID == s {
+ return binary.BigEndian.Uint32(buf[2:6]), true
+ }
+ buf = buf[6:]
+ }
+ return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ if err := fn(Setting{
+ SettingID(binary.BigEndian.Uint16(buf[:2])),
+ binary.BigEndian.Uint32(buf[2:6]),
+ }); err != nil {
+ return err
+ }
+ buf = buf[6:]
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+ f.startWrite(FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+ f.startWrite(FrameSettings, FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+ FrameHeader
+ Data [8]byte
+}
+
+func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if len(payload) != 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &PingFrame{FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+ var flags Flags
+ if ack {
+ flags = FlagPingAck
+ }
+ f.startWrite(FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+ FrameHeader
+ LastStreamID uint32
+ ErrCode ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ return &GoAwayFrame{
+ FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+ f.startWrite(FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+ FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+ return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+ FrameHeader
+ Increment uint32
+}
+
+func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ return &WindowUpdateFrame{
+ FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+ FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+ hf := &HeadersFrame{
+ FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ var padLength uint8
+ if fh.Flags.Has(FlagHeadersPadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+ if fh.Flags.Has(FlagHeadersPriority) {
+ var v uint32
+ p, v, err = readUint32(p)
+ if err != nil {
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = readByte(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) <= 0 {
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= FlagHeadersPriority
+ }
+ f.startWrite(FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !validStreamID(v) && !f.AllowIllegalWrites {
+ return errors.New("invalid dependent stream id")
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+ FrameHeader
+ PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+ return p == PriorityParam{}
+}
+
+func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(payload) != 5 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &PriorityFrame{
+ FrameHeader: fh,
+ PriorityParam: PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+ FrameHeader
+ ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+ FrameHeader
+ headerFragBuf []byte
+}
+
+func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+ return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endHeaders {
+ flags |= FlagContinuationEndHeaders
+ }
+ f.startWrite(FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+ FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+ pp := &PushPromiseFrame{
+ FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(FlagPushPromisePadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = readUint32(p)
+ if err != nil {
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= FlagPushPromiseEndHeaders
+ }
+ f.startWrite(FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+ StreamEnded() bool
+}
+
+type headersEnder interface {
+ HeadersEnded() bool
+}
diff --git a/vendor/src/golang.org/x/net/http2/gotrack.go b/vendor/src/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 0000000000..7dc2ef90db
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,173 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
+ return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g goroutineLock) checkNotOn() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+ bp := littleBuf.Get().(*[]byte)
+ defer littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
diff --git a/vendor/src/golang.org/x/net/http2/headermap.go b/vendor/src/golang.org/x/net/http2/headermap.go
new file mode 100644
index 0000000000..67c7c48357
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/headermap.go
@@ -0,0 +1,80 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+ "net/http"
+ "strings"
+)
+
+var (
+ commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+ commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+ for _, v := range []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-origin",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ } {
+ chk := http.CanonicalHeaderKey(v)
+ commonLowerHeader[chk] = v
+ commonCanonHeader[v] = chk
+ }
+}
+
+func lowerHeader(v string) string {
+ if s, ok := commonLowerHeader[v]; ok {
+ return s
+ }
+ return strings.ToLower(v)
+}
diff --git a/vendor/src/golang.org/x/net/http2/hpack/encode.go b/vendor/src/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 0000000000..19bd9f4fcb
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,252 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+import (
+ "io"
+)
+
+const (
+ uint32Max = ^uint32(0)
+ initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+ dynTab dynamicTable
+ // minSize is the minimum table size set by
+ // SetMaxDynamicTableSize after the previous Header Table Size
+ // Update.
+ minSize uint32
+ // maxSizeLimit is the maximum table size this encoder
+ // supports. This will protect the encoder from too large
+ // size.
+ maxSizeLimit uint32
+ // tableSizeUpdate indicates whether "Header Table Size
+ // Update" is required.
+ tableSizeUpdate bool
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{
+ minSize: uint32Max,
+ maxSizeLimit: initialHeaderTableSize,
+ tableSizeUpdate: false,
+ w: w,
+ }
+ e.dynTab.setMaxSize(initialHeaderTableSize)
+ return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ e.buf = e.buf[:0]
+
+ if e.tableSizeUpdate {
+ e.tableSizeUpdate = false
+ if e.minSize < e.dynTab.maxSize {
+ e.buf = appendTableSize(e.buf, e.minSize)
+ }
+ e.minSize = uint32Max
+ e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+ }
+
+ idx, nameValueMatch := e.searchTable(f)
+ if nameValueMatch {
+ e.buf = appendIndexed(e.buf, idx)
+ } else {
+ indexing := e.shouldIndex(f)
+ if indexing {
+ e.dynTab.add(f)
+ }
+
+ if idx == 0 {
+ e.buf = appendNewName(e.buf, f, indexing)
+ } else {
+ e.buf = appendIndexedName(e.buf, f, idx, indexing)
+ }
+ }
+ n, err := e.w.Write(e.buf)
+ if err == nil && n != len(e.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+ for idx, hf := range staticTable {
+ if !constantTimeStringCompare(hf.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(idx + 1)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(hf.Value, f.Value) {
+ continue
+ }
+ i = uint64(idx + 1)
+ nameValueMatch = true
+ return
+ }
+
+ j, nameValueMatch := e.dynTab.search(f)
+ if nameValueMatch || (i == 0 && j != 0) {
+ i = j + uint64(len(staticTable))
+ }
+ return
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+ if v > e.maxSizeLimit {
+ v = e.maxSizeLimit
+ }
+ if v < e.minSize {
+ e.minSize = v
+ }
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+ e.maxSizeLimit = v
+ if e.dynTab.maxSize > v {
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+ }
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+ return !f.Sensitive && f.size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, i)
+ dst[first] |= 0x80
+ return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+ dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+ dst = appendHpackString(dst, f.Name)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+ first := len(dst)
+ var n byte
+ if indexing {
+ n = 6
+ } else {
+ n = 4
+ }
+ dst = appendVarInt(dst, n, i)
+ dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 5, uint64(v))
+ dst[first] |= 0x20
+ return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+ huffmanLength := HuffmanEncodeLength(s)
+ if huffmanLength < uint64(len(s)) {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, huffmanLength)
+ dst = AppendHuffmanString(dst, s)
+ dst[first] |= 0x80
+ } else {
+ dst = appendVarInt(dst, 7, uint64(len(s)))
+ dst = append(dst, s...)
+ }
+ return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+ if sensitive {
+ return 0x10
+ }
+ if indexing {
+ return 0x40
+ }
+ return 0
+}
diff --git a/vendor/src/golang.org/x/net/http2/hpack/hpack.go b/vendor/src/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 0000000000..c9e36f7427
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,445 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+ Err error
+}
+
+func (de DecodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name, Value string
+
+ // Sensitive means that this header field should never be
+ // indexed.
+ Sensitive bool
+}
+
+func (hf *HeaderField) size() uint32 {
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+ // "The size of the dynamic table is the sum of the size of
+ // its entries. The size of an entry is the sum of its name's
+ // length in octets (as defined in Section 5.2), its value's
+ // length in octets (see Section 5.2), plus 32. The size of
+ // an entry is calculated using the length of the name and
+ // value without any Huffman encoding applied."
+
+ // This can overflow if somebody makes a large HeaderField
+ // Name and/or Value by hand, but we don't care, because that
+ // won't happen on the wire because the encoding doesn't allow
+ // it.
+ return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ dynTab dynamicTable
+ emit func(f HeaderField)
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // usually not owned
+ saveBuf bytes.Buffer
+}
+
+func NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder {
+ d := &Decoder{
+ emit: emitFunc,
+ }
+ d.dynTab.allowedMaxSize = maxSize
+ d.dynTab.setMaxSize(maxSize)
+ return d
+}
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+ d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+ d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+ // ents is the FIFO described at
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+ // The newest (low index) is append at the end, and items are
+ // evicted from the front.
+ ents []HeaderField
+ size uint32
+ maxSize uint32 // current maxSize
+ allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+ dt.maxSize = v
+ dt.evict()
+}
+
+// TODO: change dynamicTable to be a struct with a slice and a size int field,
+// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
+//
+//
+// Then make add increment the size. maybe the max size should move from Decoder to
+// dynamicTable and add should return an ok bool if there was enough space.
+//
+// Later we'll need a remove operation on dynamicTable.
+
+func (dt *dynamicTable) add(f HeaderField) {
+ dt.ents = append(dt.ents, f)
+ dt.size += f.size()
+ dt.evict()
+}
+
+// If we're too big, evict old stuff (front of the slice)
+func (dt *dynamicTable) evict() {
+ base := dt.ents // keep base pointer of slice
+ for dt.size > dt.maxSize {
+ dt.size -= dt.ents[0].size()
+ dt.ents = dt.ents[1:]
+ }
+
+ // Shift slice contents down if we evicted things.
+ if len(dt.ents) != len(base) {
+ copy(base, dt.ents)
+ dt.ents = base[:len(dt.ents)]
+ }
+}
+
+// constantTimeStringCompare compares string a and b in a constant
+// time manner.
+func constantTimeStringCompare(a, b string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ c := byte(0)
+
+ for i := 0; i < len(a); i++ {
+ c |= a[i] ^ b[i]
+ }
+
+ return c == 0
+}
+
+// Search searches f in the table. The return value i is 0 if there is
+// no name match. If there is name match or name/value match, i is the
+// index of that entry (1-based). If both name and value match,
+// nameValueMatch becomes true.
+func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ l := len(dt.ents)
+ for j := l - 1; j >= 0; j-- {
+ ent := dt.ents[j]
+ if !constantTimeStringCompare(ent.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(l - j)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(ent.Value, f.Value) {
+ continue
+ }
+ i = uint64(l - j)
+ nameValueMatch = true
+ return
+ }
+ return
+}
+
+func (d *Decoder) maxTableIndex() int {
+ return len(d.dynTab.ents) + len(staticTable)
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ if i < 1 {
+ return
+ }
+ if i > uint64(d.maxTableIndex()) {
+ return
+ }
+ if i <= uint64(len(staticTable)) {
+ return staticTable[i-1], true
+ }
+ dents := d.dynTab.ents
+ return dents[len(dents)-(int(i)-len(staticTable))], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ var hf []HeaderField
+ saveFunc := d.emit
+ defer func() { d.emit = saveFunc }()
+ d.emit = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.Write(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return DecodingError{errors.New("truncated headers")}
+ }
+ return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ // Prevent state machine CPU attacks (making us redo
+ // work up to the point of finding out we don't have
+ // enough data)
+ return
+ }
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ for len(d.buf) > 0 {
+ err = d.parseHeaderFieldRepr()
+ if err != nil {
+ if err == errNeedMore {
+ err = nil
+ d.saveBuf.Write(d.buf)
+ }
+ break
+ }
+ }
+
+ return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+ indexedTrue indexType = iota
+ indexedFalse
+ indexedNever
+)
+
+func (v indexType) indexed() bool { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+ b := d.buf[0]
+ switch {
+ case b&128 != 0:
+ // Indexed representation.
+ // High bit set?
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+ return d.parseFieldIndexed()
+ case b&192 == 64:
+ // 6.2.1 Literal Header Field with Incremental Indexing
+ // 0b10xxxxxx: top two bits are 10
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+ return d.parseFieldLiteral(6, indexedTrue)
+ case b&240 == 0:
+ // 6.2.2 Literal Header Field without Indexing
+ // 0b0000xxxx: top four bits are 0000
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+ return d.parseFieldLiteral(4, indexedFalse)
+ case b&240 == 16:
+ // 6.2.3 Literal Header Field never Indexed
+ // 0b0001xxxx: top four bits are 0001
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+ return d.parseFieldLiteral(4, indexedNever)
+ case b&224 == 32:
+ // 6.3 Dynamic Table Size Update
+ // Top three bits are '001'.
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+ return d.parseDynamicTableSizeUpdate()
+ }
+
+ return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+ buf := d.buf
+ idx, buf, err := readVarInt(7, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(idx)
+ if !ok {
+ return DecodingError{InvalidIndexError(idx)}
+ }
+ d.emit(HeaderField{Name: hf.Name, Value: hf.Value})
+ d.buf = buf
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ buf := d.buf
+ nameIdx, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return err
+ }
+
+ var hf HeaderField
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+ return DecodingError{InvalidIndexError(nameIdx)}
+ }
+ hf.Name = ihf.Name
+ } else {
+ hf.Name, buf, err = readString(buf)
+ if err != nil {
+ return err
+ }
+ }
+ hf.Value, buf, err = readString(buf)
+ if err != nil {
+ return err
+ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+ }
+ hf.Sensitive = it.sensitive()
+ d.emit(hf)
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ buf := d.buf
+ size, buf, err := readVarInt(5, buf)
+ if err != nil {
+ return err
+ }
+ if size > uint64(d.dynTab.allowedMaxSize) {
+ return DecodingError{errors.New("dynamic table size update too large")}
+ }
+ d.dynTab.setMaxSize(uint32(size))
+ d.buf = buf
+ return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1<<uint64(n))-1 {
+ return i, p[1:], nil
+ }
+
+ origP := p
+ p = p[1:]
+ var m uint64
+ for len(p) > 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
+
+func readString(p []byte) (s string, remain []byte, err error) {
+ if len(p) == 0 {
+ return "", p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+ return "", p, err
+ }
+ if uint64(len(p)) < strLen {
+ return "", p, errNeedMore
+ }
+ if !isHuff {
+ return string(p[:strLen]), p[strLen:], nil
+ }
+
+ // TODO: optimize this garbage:
+ var buf bytes.Buffer
+ if _, err := HuffmanDecode(&buf, p[:strLen]); err != nil {
+ return "", nil, err
+ }
+ return buf.String(), p[strLen:], nil
+}
diff --git a/vendor/src/golang.org/x/net/http2/hpack/huffman.go b/vendor/src/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 0000000000..9fe76f68ee
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,159 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+import (
+ "bytes"
+ "io"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ n := rootHuffmanNode
+ cur, nbits := uint(0), uint8(0)
+ for _, b := range v {
+ cur = cur<<8 | uint(b)
+ nbits += 8
+ for nbits >= 8 {
+ n = n.children[byte(cur>>(nbits-8))]
+ if n.children == nil {
+ buf.WriteByte(n.sym)
+ nbits -= n.codeLen
+ n = rootHuffmanNode
+ } else {
+ nbits -= 8
+ }
+ }
+ }
+ for nbits > 0 {
+ n = n.children[byte(cur<<(8-nbits))]
+ if n.children != nil || n.codeLen > nbits {
+ break
+ }
+ buf.WriteByte(n.sym)
+ nbits -= n.codeLen
+ n = rootHuffmanNode
+ }
+ return w.Write(buf.Bytes())
+}
+
+type node struct {
+ // children is non-nil for internal nodes
+ children []*node
+
+ // The following are only valid if children is nil:
+ codeLen uint8 // number of bits that led to the output of sym
+ sym byte // output symbol
+}
+
+func newInternalNode() *node {
+ return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+ for i, code := range huffmanCodes {
+ if i > 255 {
+ panic("too many huffman codes")
+ }
+ addDecoderNode(byte(i), code, huffmanCodeLen[i])
+ }
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+ cur := rootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &node{sym: sym, codeLen: codeLen}
+ }
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+ rembits := uint8(8)
+
+ for i := 0; i < len(s); i++ {
+ if rembits == 8 {
+ dst = append(dst, 0)
+ }
+ dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+ }
+
+ if rembits < 8 {
+ // special EOS symbol
+ code := uint32(0x3fffffff)
+ nbits := uint8(30)
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+ }
+
+ return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+ n := uint64(0)
+ for i := 0; i < len(s); i++ {
+ n += uint64(huffmanCodeLen[s[i]])
+ }
+ return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+ code := huffmanCodes[c]
+ nbits := huffmanCodeLen[c]
+
+ for {
+ if rembits > nbits {
+ t := uint8(code << (rembits - nbits))
+ dst[len(dst)-1] |= t
+ rembits -= nbits
+ break
+ }
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+
+ nbits -= rembits
+ rembits = 8
+
+ if nbits == 0 {
+ break
+ }
+
+ dst = append(dst, 0)
+ }
+
+ return dst, rembits
+}
diff --git a/vendor/src/golang.org/x/net/http2/hpack/tables.go b/vendor/src/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 0000000000..f898e25126
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,353 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+func pair(name, value string) HeaderField {
+ return HeaderField{Name: name, Value: value}
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = []HeaderField{
+ pair(":authority", ""), // index 1 (1-based)
+ pair(":method", "GET"),
+ pair(":method", "POST"),
+ pair(":path", "/"),
+ pair(":path", "/index.html"),
+ pair(":scheme", "http"),
+ pair(":scheme", "https"),
+ pair(":status", "200"),
+ pair(":status", "204"),
+ pair(":status", "206"),
+ pair(":status", "304"),
+ pair(":status", "400"),
+ pair(":status", "404"),
+ pair(":status", "500"),
+ pair("accept-charset", ""),
+ pair("accept-encoding", "gzip, deflate"),
+ pair("accept-language", ""),
+ pair("accept-ranges", ""),
+ pair("accept", ""),
+ pair("access-control-allow-origin", ""),
+ pair("age", ""),
+ pair("allow", ""),
+ pair("authorization", ""),
+ pair("cache-control", ""),
+ pair("content-disposition", ""),
+ pair("content-encoding", ""),
+ pair("content-language", ""),
+ pair("content-length", ""),
+ pair("content-location", ""),
+ pair("content-range", ""),
+ pair("content-type", ""),
+ pair("cookie", ""),
+ pair("date", ""),
+ pair("etag", ""),
+ pair("expect", ""),
+ pair("expires", ""),
+ pair("from", ""),
+ pair("host", ""),
+ pair("if-match", ""),
+ pair("if-modified-since", ""),
+ pair("if-none-match", ""),
+ pair("if-range", ""),
+ pair("if-unmodified-since", ""),
+ pair("last-modified", ""),
+ pair("link", ""),
+ pair("location", ""),
+ pair("max-forwards", ""),
+ pair("proxy-authenticate", ""),
+ pair("proxy-authorization", ""),
+ pair("range", ""),
+ pair("referer", ""),
+ pair("refresh", ""),
+ pair("retry-after", ""),
+ pair("server", ""),
+ pair("set-cookie", ""),
+ pair("strict-transport-security", ""),
+ pair("transfer-encoding", ""),
+ pair("user-agent", ""),
+ pair("vary", ""),
+ pair("via", ""),
+ pair("www-authenticate", ""),
+}
+
+var huffmanCodes = []uint32{
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+}
+
+var huffmanCodeLen = []uint8{
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/vendor/src/golang.org/x/net/http2/http2.go b/vendor/src/golang.org/x/net/http2/http2.go
new file mode 100644
index 0000000000..35f9b26e28
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/http2.go
@@ -0,0 +1,249 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This is a work in progress. This package is low-level and intended
+// to be used directly by very few people. Most users will use it
+// indirectly through integration with the net/http package. See
+// ConfigureServer. That ConfigureServer call will likely be automatic
+// or available via an empty import in the future.
+//
+// See http://http2.github.io/
+package http2
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "sync"
+)
+
+var VerboseLogs = false
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ NextProtoTLS = "h2"
+
+ // http://http2.github.io/http2-spec/#SettingValues
+ initialHeaderTableSize = 4096
+
+ initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+const (
+ stateIdle streamState = iota
+ stateOpen
+ stateHalfClosedLocal
+ stateHalfClosedRemote
+ stateResvLocal
+ stateResvRemote
+ stateClosed
+)
+
+var stateName = [...]string{
+ stateIdle: "Idle",
+ stateOpen: "Open",
+ stateHalfClosedLocal: "HalfClosedLocal",
+ stateHalfClosedRemote: "HalfClosedRemote",
+ stateResvLocal: "ResvLocal",
+ stateResvRemote: "ResvRemote",
+ stateClosed: "Closed",
+}
+
+func (st streamState) String() string {
+ return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+ // ID is which setting is being set.
+ // See http://http2.github.io/http2-spec/#SettingValues
+ ID SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ case SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+ if v, ok := settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+func validHeader(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ // "Just as in HTTP/1.x, header field names are
+ // strings of ASCII characters that are compared in a
+ // case-insensitive fashion. However, header field
+ // names MUST be converted to lowercase prior to their
+ // encoding in HTTP/2. "
+ if r >= 127 || ('A' <= r && r <= 'Z') {
+ return false
+ }
+ }
+ return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+ for i := 100; i <= 999; i++ {
+ if v := http.StatusText(i); v != "" {
+ httpCodeStringCommon[i] = strconv.Itoa(i)
+ }
+ }
+}
+
+func httpCodeString(code int) string {
+ if s, ok := httpCodeStringCommon[code]; ok {
+ return s
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+ return &bufferedWriter{w: w}
+}
+
+var bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ // TODO: pick something better? this is a bit under
+ // (3 x typical 1500 byte MTU) at least.
+ return bufio.NewWriterSize(nil, 4<<10)
+ },
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
diff --git a/vendor/src/golang.org/x/net/http2/pipe.go b/vendor/src/golang.org/x/net/http2/pipe.go
new file mode 100644
index 0000000000..ce9aad5336
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,43 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+ "sync"
+)
+
+type pipe struct {
+ b buffer
+ c sync.Cond
+ m sync.Mutex
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (r *pipe) Read(p []byte) (n int, err error) {
+ r.c.L.Lock()
+ defer r.c.L.Unlock()
+ for r.b.Len() == 0 && !r.b.closed {
+ r.c.Wait()
+ }
+ return r.b.Read(p)
+}
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (w *pipe) Write(p []byte) (n int, err error) {
+ w.c.L.Lock()
+ defer w.c.L.Unlock()
+ defer w.c.Signal()
+ return w.b.Write(p)
+}
+
+func (c *pipe) Close(err error) {
+ c.c.L.Lock()
+ defer c.c.L.Unlock()
+ defer c.c.Signal()
+ c.b.Close(err)
+}
diff --git a/vendor/src/golang.org/x/net/http2/server.go b/vendor/src/golang.org/x/net/http2/server.go
new file mode 100644
index 0000000000..99cc673cc2
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/server.go
@@ -0,0 +1,1780 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// TODO: replace all <-sc.doneServing with reads from the stream's cw
+// instead, and make sure that on close we close all open
+// streams. then remove doneServing?
+
+// TODO: finish GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during a shutdown race.
+
+// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
+// configurable? or maximum number of idle clients and remove the
+// oldest?
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+ errClientDisconnected = errors.New("client disconnected")
+ errClosedBody = errors.New("body closed by handler")
+ errStreamBroken = errors.New("http2: stream broken")
+)
+
+var responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &responseWriterState{}
+ rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ testHookOnConn func()
+ testHookGetServerConn func(*serverConn)
+ testHookOnPanicMu *sync.Mutex // nil except in tests
+ testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+ return v
+ }
+ return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return defaultMaxStreams
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) {
+ if conf == nil {
+ conf = new(Server)
+ }
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ // Be sure we advertise tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ // at least.
+ // TODO: enable PreferServerCipherSuites?
+ if s.TLSConfig.CipherSuites != nil {
+ const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ haveRequired := false
+ for _, v := range s.TLSConfig.CipherSuites {
+ if v == requiredCipher {
+ haveRequired = true
+ break
+ }
+ }
+ if !haveRequired {
+ s.TLSConfig.CipherSuites = append(s.TLSConfig.CipherSuites, requiredCipher)
+ }
+ }
+
+ haveNPN := false
+ for _, p := range s.TLSConfig.NextProtos {
+ if p == NextProtoTLS {
+ haveNPN = true
+ break
+ }
+ }
+ if !haveNPN {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+ }
+ // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
+ // to switch to "h2".
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+ }
+ protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ if testHookOnConn != nil {
+ testHookOnConn()
+ }
+ conf.handleConn(hs, c, h)
+ }
+ s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
+}
+
+func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
+ sc := &serverConn{
+ srv: srv,
+ hs: hs,
+ conn: c,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: h,
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan frameAndGate),
+ readFrameErrCh: make(chan error, 1), // must be buffered for 1
+ wantWriteFrameCh: make(chan frameWriteMsg, 8),
+ wroteFrameCh: make(chan struct{}, 1), // buffered; one send in reading goroutine
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ advMaxStreams: srv.maxConcurrentStreams(),
+ writeSched: writeScheduler{
+ maxFrameSize: initialMaxFrameSize,
+ },
+ initialWindowSize: initialWindowSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
+ }
+ sc.flow.add(initialWindowSize)
+ sc.inflow.add(initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+ sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField)
+
+ fr := NewFramer(sc.bw, c)
+ fr.SetMaxReadFrameSize(srv.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(*tls.Conn); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if hook := testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+ sc.serve()
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+ log.Printf("REJECTING conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+// frameAndGates coordinates the readFrames and serve
+// goroutines. Because the Framer interface only permits the most
+// recently-read Frame from being accessed, the readFrames goroutine
+// blocks until it has a frame, passes it to serve, and then waits for
+// serve to be done with it before reading the next one.
+type frameAndGate struct {
+ f Frame
+ g gate
+}
+
+type serverConn struct {
+ // Immutable:
+ srv *Server
+ hs *http.Server
+ conn net.Conn
+ bw *bufferedWriter // writing to conn
+ handler http.Handler
+ framer *Framer
+ hpackDecoder *hpack.Decoder
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan frameAndGate // written by serverConn.readFrames
+ readFrameErrCh chan error
+ wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
+ wroteFrameCh chan struct{} // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ testHookCh chan func() // code to run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curOpenStreams uint32 // client's number of open streams
+ maxStreamID uint32 // max ever seen
+ streams map[uint32]*stream
+ initialWindowSize int32
+ headerTableSize uint32
+ maxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ req requestParam // non-zero while reading request headers
+ writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ writeSched writeScheduler
+ inGoAway bool // we've started to or sent GOAWAY
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimerCh <-chan time.Time // nil until used
+ shutdownTimer *time.Timer // nil until used
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+}
+
+// requestParam is the state of the next request, initialized over
+// potentially several frames HEADERS + zero or more CONTINUATION
+// frames.
+type requestParam struct {
+ // stream is non-nil if we're reading (HEADER or CONTINUATION)
+ // frames for a request (but not DATA).
+ stream *stream
+ header http.Header
+ method, path string
+ scheme, authority string
+ sawRegularHeader bool // saw a non-pseudo header already
+ invalidHeader bool // an invalid header was seen
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+ // immutable:
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
+ parent *stream // or nil
+ weight uint8
+ state streamState
+ sentReset bool // only true once detached from streams map
+ gotReset bool // only true once detacted from streams map
+}
+
+func (sc *serverConn) Framer() *Framer { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+ sc.serveG.check()
+ // http://http2.github.io/http2-spec/#rfc.section.5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID <= sc.maxStreamID {
+ return stateClosed, nil
+ }
+ return stateIdle, nil
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ str := err.Error()
+ if err == io.EOF || strings.Contains(str, "use of closed network connection") {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) {
+ sc.serveG.check()
+ sc.vlogf("got header field %+v", f)
+ switch {
+ case !validHeader(f.Name):
+ sc.req.invalidHeader = true
+ case strings.HasPrefix(f.Name, ":"):
+ if sc.req.sawRegularHeader {
+ sc.logf("pseudo-header after regular header")
+ sc.req.invalidHeader = true
+ return
+ }
+ var dst *string
+ switch f.Name {
+ case ":method":
+ dst = &sc.req.method
+ case ":path":
+ dst = &sc.req.path
+ case ":scheme":
+ dst = &sc.req.scheme
+ case ":authority":
+ dst = &sc.req.authority
+ default:
+ // 8.1.2.1 Pseudo-Header Fields
+ // "Endpoints MUST treat a request or response
+ // that contains undefined or invalid
+ // pseudo-header fields as malformed (Section
+ // 8.1.2.6)."
+ sc.logf("invalid pseudo-header %q", f.Name)
+ sc.req.invalidHeader = true
+ return
+ }
+ if *dst != "" {
+ sc.logf("duplicate pseudo-header %q sent", f.Name)
+ sc.req.invalidHeader = true
+ return
+ }
+ *dst = f.Value
+ case f.Name == "cookie":
+ sc.req.sawRegularHeader = true
+ if s, ok := sc.req.header["Cookie"]; ok && len(s) == 1 {
+ s[0] = s[0] + "; " + f.Value
+ } else {
+ sc.req.header.Add("Cookie", f.Value)
+ }
+ default:
+ sc.req.sawRegularHeader = true
+ sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value)
+ }
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ cv, ok := commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = http.CanonicalHeaderKey(v)
+ sc.canonHeader[v] = cv
+ return cv
+}
+
+// readFrames is the loop that reads incoming frames.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+ g := make(gate, 1)
+ for {
+ f, err := sc.framer.ReadFrame()
+ if err != nil {
+ sc.readFrameErrCh <- err
+ close(sc.readFrameCh)
+ return
+ }
+ sc.readFrameCh <- frameAndGate{f, g}
+ // We can't read another frame until this one is
+ // processed, as the ReadFrame interface doesn't copy
+ // memory. The Frame accessor methods access the last
+ // frame's (shared) buffer. So we wait for the
+ // serve goroutine to tell us it's done:
+ g.Wait()
+ }
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
+ err := wm.write.writeFrame(sc)
+ if ch := wm.done; ch != nil {
+ select {
+ case ch <- err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+ }
+ }
+ sc.wroteFrameCh <- struct{}{} // tickle frame selection scheduler
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, errClientDisconnected)
+ }
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *serverConn) notePanic() {
+ if testHookOnPanicMu != nil {
+ testHookOnPanicMu.Lock()
+ defer testHookOnPanicMu.Unlock()
+ }
+ if testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+
+ sc.writeFrame(frameWriteMsg{
+ write: writeSettings{
+ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+
+ // TODO: more actual settings, notably
+ // SettingInitialWindowSize, but then we also
+ // want to bump up the conn window size the
+ // same amount here right after the settings
+ },
+ })
+ sc.unackedSettings++
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.NewTimer(firstSettingsTimeout)
+ for {
+ select {
+ case wm := <-sc.wantWriteFrameCh:
+ sc.writeFrame(wm)
+ case <-sc.wroteFrameCh:
+ if sc.writingFrame != true {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+ sc.scheduleFrameWrite()
+ case fg, ok := <-sc.readFrameCh:
+ if !ok {
+ sc.readFrameCh = nil
+ }
+ if !sc.processFrameFromReader(fg, ok) {
+ return
+ }
+ if settingsTimer.C != nil {
+ settingsTimer.Stop()
+ settingsTimer.C = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case <-settingsTimer.C:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case <-sc.shutdownTimerCh:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case fn := <-sc.testHookCh:
+ fn()
+ }
+ }
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return errors.New("timeout waiting for client preface")
+ case err := <-errc:
+ if err == nil {
+ sc.vlogf("client %v said hello", sc.conn.RemoteAddr())
+ }
+ return err
+ }
+}
+
+// writeDataFromHandler writes the data described in req to stream.id.
+//
+// The provided ch is used to avoid allocating new channels for each
+// write operation. It's expected that the caller reuses writeData and ch
+// over time.
+//
+// The flow control currently happens in the Handler where it waits
+// for 1 or more bytes to be available to then write here. So at this
+// point we know that we have flow control. But this might have to
+// change when priority is implemented, so the serve goroutine knows
+// the total amount of bytes waiting to be sent and can can have more
+// scheduling decisions available.
+func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData, ch chan error) error {
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: writeData,
+ stream: stream,
+ done: ch,
+ })
+ select {
+ case err := <-ch:
+ return err
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-stream.cw:
+ return errStreamBroken
+ }
+}
+
+// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wm:
+ case <-sc.doneServing:
+ // Client has closed their connection to the server.
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+ sc.serveG.check()
+ sc.writeSched.add(wm)
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wm (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wm.
+func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+ sc.writingFrame = true
+
+ st := wm.stream
+ if st != nil {
+ switch st.state {
+ case stateHalfClosedLocal:
+ panic("internal error: attempt to send frame on half-closed-local stream")
+ case stateClosed:
+ if st.sentReset || st.gotReset {
+ // Skip this frame. But fake the frame write to reschedule:
+ sc.wroteFrameCh <- struct{}{}
+ return
+ }
+ panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+ }
+ }
+
+ sc.needsFrameFlush = true
+ if endsStream(wm.write) {
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for finishing writing to a ResponseWriter
+ // while still reading data (see possible TODO
+ // at top of this file), we go into closed
+ // state here anyway, after telling the peer
+ // we're hanging up on them.
+ st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
+ errCancel := StreamError{st.id, ErrCodeCancel}
+ sc.resetStream(errCancel)
+ case stateHalfClosedRemote:
+ sc.closeStream(st, nil)
+ }
+ }
+ go sc.writeFrameAsync(wm)
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame {
+ return
+ }
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(frameWriteMsg{
+ write: &writeGoAway{
+ maxStreamID: sc.maxStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ return
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+ return
+ }
+ if !sc.inGoAway {
+ if wm, ok := sc.writeSched.take(); ok {
+ sc.startFrameWrite(wm)
+ return
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ return
+ }
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ return
+ }
+ if code != ErrCodeNo {
+ sc.shutDownIn(250 * time.Millisecond)
+ } else {
+ // TODO: configurable
+ sc.shutDownIn(1 * time.Second)
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.NewTimer(d)
+ sc.shutdownTimerCh = sc.shutdownTimer.C
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(frameWriteMsg{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.sentReset = true
+ sc.closeStream(st, se)
+ }
+}
+
+// curHeaderStreamID returns the stream ID of the header block we're
+// currently in the middle of reading. If this returns non-zero, the
+// next frame must be a CONTINUATION with this stream id.
+func (sc *serverConn) curHeaderStreamID() uint32 {
+ sc.serveG.check()
+ st := sc.req.stream
+ if st == nil {
+ return 0
+ }
+ return st.id
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool {
+ sc.serveG.check()
+ var clientGone bool
+ var err error
+ if !fgValid {
+ err = <-sc.readFrameErrCh
+ if err == ErrFrameTooLarge {
+ sc.goAway(ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone = err == io.EOF || strings.Contains(err.Error(), "use of closed network connection")
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ }
+
+ if fgValid {
+ f := fg.f
+ sc.vlogf("got %v: %#v", f.Header(), f)
+ err = sc.processFrame(f)
+ fg.g.Done() // unblock the readFrames goroutine
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case StreamError:
+ sc.resetStream(ev)
+ return true
+ case goAwayFlowError:
+ sc.goAway(ErrCodeFlowControl)
+ return true
+ case ConnectionError:
+ sc.logf("%v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if !fgValid {
+ sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("disconnection due to other error: %v", err)
+ }
+ }
+ return false
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.sawFirstSettings = true
+ }
+
+ if s := sc.curHeaderStreamID(); s != 0 {
+ if cf, ok := f.(*ContinuationFrame); !ok {
+ return ConnectionError(ErrCodeProtocol)
+ } else if cf.Header().StreamID != s {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+
+ switch f := f.(type) {
+ case *SettingsFrame:
+ return sc.processSettings(f)
+ case *HeadersFrame:
+ return sc.processHeaders(f)
+ case *ContinuationFrame:
+ return sc.processContinuation(f)
+ case *WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *PingFrame:
+ return sc.processPing(f)
+ case *DataFrame:
+ return sc.processData(f)
+ case *RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *PriorityFrame:
+ return sc.processPriority(f)
+ case *PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ default:
+ log.Printf("Ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+ sc.serveG.check()
+ if f.Flags.Has(FlagSettingsAck) {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+ return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ st := sc.streams[f.StreamID]
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return StreamError{f.StreamID, ErrCodeFlowControl}
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st != nil {
+ st.gotReset = true
+ sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
+ }
+ return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+ sc.serveG.check()
+ if st.state == stateIdle || st.state == stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = stateClosed
+ sc.curOpenStreams--
+ delete(sc.streams, st.id)
+ if p := st.body; p != nil {
+ p.Close(err)
+ }
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.forgetStream(st.id)
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ return nil
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ sc.vlogf("processing setting %v", s)
+ switch s.ID {
+ case SettingHeaderTableSize:
+ sc.headerTableSize = s.Val
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case SettingMaxFrameSize:
+ sc.writeSched.maxFrameSize = s.Val
+ case SettingMaxHeaderListSize:
+ sc.maxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ }
+ return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialWindowSize
+ sc.initialWindowSize = int32(val)
+ growth := sc.initialWindowSize - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+ sc.serveG.check()
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ id := f.Header().StreamID
+ st, ok := sc.streams[id]
+ if !ok || st.state != stateOpen {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+ data := f.Data()
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ st.body.Close(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if len(data) > 0 {
+ // Check whether the client has flow control quota.
+ if int(st.inflow.available()) < len(data) {
+ return StreamError{id, ErrCodeFlowControl}
+ }
+ st.inflow.take(int32(len(data)))
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
+ }
+ if f.StreamEnded() {
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.Close(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.Close(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+ }
+ return nil
+}
+
+func (sc *serverConn) processHeaders(f *HeadersFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+ if sc.inGoAway {
+ // Ignore.
+ return nil
+ }
+ // http://http2.github.io/http2-spec/#rfc.section.5.1.1
+ if id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil {
+ // Streams initiated by a client MUST use odd-numbered
+ // stream identifiers. [...] The identifier of a newly
+ // established stream MUST be numerically greater than all
+ // streams that the initiating endpoint has opened or
+ // reserved. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if id > sc.maxStreamID {
+ sc.maxStreamID = id
+ }
+ st := &stream{
+ id: id,
+ state: stateOpen,
+ }
+ if f.StreamEnded() {
+ st.state = stateHalfClosedRemote
+ }
+ st.cw.Init()
+
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+ sc.streams[id] = st
+ if f.HasPriority() {
+ adjustStreamPriority(sc.streams, st.id, f.Priority)
+ }
+ sc.curOpenStreams++
+ sc.req = requestParam{
+ stream: st,
+ header: make(http.Header),
+ }
+ return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
+}
+
+func (sc *serverConn) processContinuation(f *ContinuationFrame) error {
+ sc.serveG.check()
+ st := sc.streams[f.Header().StreamID]
+ if st == nil || sc.curHeaderStreamID() != st.id {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
+}
+
+func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error {
+ sc.serveG.check()
+ if _, err := sc.hpackDecoder.Write(frag); err != nil {
+ // TODO: convert to stream error I assume?
+ return err
+ }
+ if !end {
+ return nil
+ }
+ if err := sc.hpackDecoder.Close(); err != nil {
+ // TODO: convert to stream error I assume?
+ return err
+ }
+ defer sc.resetPendingRequest()
+ if sc.curOpenStreams > sc.advMaxStreams {
+ // "Endpoints MUST NOT exceed the limit set by their
+ // peer. An endpoint that receives a HEADERS frame
+ // that causes their advertised concurrent stream
+ // limit to be exceeded MUST treat this as a stream
+ // error (Section 5.4.2) of type PROTOCOL_ERROR or
+ // REFUSED_STREAM."
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return StreamError{st.id, ErrCodeRefusedStream}
+ }
+
+ rw, req, err := sc.newWriterAndRequest()
+ if err != nil {
+ return err
+ }
+ st.body = req.Body.(*requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+ go sc.runHandler(rw, req)
+ return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
+ st, ok := streams[streamID]
+ if !ok {
+ // TODO: not quite correct (this streamID might
+ // already exist in the dep tree, but be closed), but
+ // close enough for now.
+ return
+ }
+ st.weight = priority.Weight
+ parent := streams[priority.StreamDep] // might be nil
+ if parent == st {
+ // if client tries to set this stream to be the parent of itself
+ // ignore and keep going
+ return
+ }
+
+ // section 5.3.3: If a stream is made dependent on one of its
+ // own dependencies, the formerly dependent stream is first
+ // moved to be dependent on the reprioritized stream's previous
+ // parent. The moved dependency retains its weight.
+ for piter := parent; piter != nil; piter = piter.parent {
+ if piter == st {
+ parent.parent = st.parent
+ break
+ }
+ }
+ st.parent = parent
+ if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
+ for _, openStream := range streams {
+ if openStream != st && openStream.parent == st.parent {
+ openStream.parent = st
+ }
+ }
+ }
+}
+
+// resetPendingRequest zeros out all state related to a HEADERS frame
+// and its zero or more CONTINUATION frames sent to start a new
+// request.
+func (sc *serverConn) resetPendingRequest() {
+ sc.serveG.check()
+ sc.req = requestParam{}
+}
+
+func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+ rp := &sc.req
+ if rp.invalidHeader || rp.method == "" || rp.path == "" ||
+ (rp.scheme != "https" && rp.scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+ var tlsState *tls.ConnectionState // nil if not scheme https
+ if rp.scheme == "https" {
+ tlsState = sc.tlsState
+ }
+ authority := rp.authority
+ if authority == "" {
+ authority = rp.header.Get("Host")
+ }
+ needsContinue := rp.header.Get("Expect") == "100-continue"
+ if needsContinue {
+ rp.header.Del("Expect")
+ }
+ bodyOpen := rp.stream.state == stateOpen
+ body := &requestBody{
+ conn: sc,
+ stream: rp.stream,
+ needsContinue: needsContinue,
+ }
+ // TODO: handle asterisk '*' requests + test
+ url, err := url.ParseRequestURI(rp.path)
+ if err != nil {
+ // TODO: find the right error code?
+ return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+ req := &http.Request{
+ Method: rp.method,
+ URL: url,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: rp.header,
+ RequestURI: rp.path,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: authority,
+ Body: body,
+ }
+ if bodyOpen {
+ body.pipe = &pipe{
+ b: buffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX
+ }
+ body.pipe.c.L = &body.pipe.m
+
+ if vv, ok := rp.header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
+ }
+
+ rws := responseWriterStatePool.Get().(*responseWriterState)
+ bwSave := rws.bw
+ *rws = responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(chunkWriter{rws})
+ rws.stream = rp.stream
+ rws.req = req
+ rws.body = body
+ rws.frameWriteCh = make(chan error, 1)
+
+ rw := &responseWriter{rws: rws}
+ return rw, req, nil
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request) {
+ defer rw.handlerDone()
+ // TODO: catch panics like net/http.Server
+ sc.handler.ServeHTTP(rw, req)
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders, tempCh chan error) {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = tempCh
+ }
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: headerData,
+ stream: st,
+ done: errc,
+ })
+ if errc != nil {
+ select {
+ case <-errc:
+ // Ignore. Just for synchronization.
+ // Any error will be handled in the writing goroutine.
+ case <-sc.doneServing:
+ // Client has closed the connection.
+ }
+ }
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+ st *stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+ sc.serveG.checkNotOn() // NOT on
+ sc.bodyReadCh <- bodyReadMsg{st, n}
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != stateHalfClosedRemote && st.state != stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+ sc.serveG.check()
+ // "The legal range for the increment to the flow control
+ // window is 1 to 2^31-1 (2,147,483,647) octets."
+ // A Go Read call on 64-bit machines could in theory read
+ // a larger Read than this. Very unlikely, but we handle it here
+ // rather than elsewhere for now.
+ const maxUint31 = 1<<31 - 1
+ for n >= maxUint31 {
+ sc.sendWindowUpdate32(st, maxUint31)
+ n -= maxUint31
+ }
+ sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.serveG.check()
+ if n == 0 {
+ return
+ }
+ if n < 0 {
+ panic("negative update")
+ }
+ var streamID uint32
+ if st != nil {
+ streamID = st.id
+ }
+ sc.writeFrame(frameWriteMsg{
+ write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ stream: st,
+ })
+ var ok bool
+ if st == nil {
+ ok = sc.inflow.add(n)
+ } else {
+ ok = st.inflow.add(n)
+ }
+ if !ok {
+ panic("internal error; sent too many window updates without decrements?")
+ }
+}
+
+type requestBody struct {
+ stream *stream
+ conn *serverConn
+ closed bool
+ pipe *pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+ if b.pipe != nil {
+ b.pipe.Close(errClosedBody)
+ }
+ b.closed = true
+ return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if n > 0 {
+ b.conn.noteBodyReadFromHandler(b.stream, n)
+ }
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+ rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ http.CloseNotifier = (*responseWriter)(nil)
+ _ http.Flusher = (*responseWriter)(nil)
+ _ stringWriter = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+ // immutable within a request:
+ stream *stream
+ req *http.Request
+ body *requestBody // to close at end of request, if DATA frames didn't
+ conn *serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader http.Header // nil until called
+ snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+ curWrite writeData
+ frameWriteCh chan error // re-used whenever we need to block on a frame being written
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string // implicit ones, if we can calculate it
+ if rws.handlerDone && rws.snapHeader.Get("Content-Length") == "" {
+ clen = strconv.Itoa(len(p))
+ }
+ if rws.snapHeader.Get("Content-Type") == "" {
+ ctype = http.DetectContentType(p)
+ }
+ endStream := rws.handlerDone && len(p) == 0
+ rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ }, rws.frameWriteCh)
+ if endStream {
+ return 0, nil
+ }
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+ curWrite := &rws.curWrite
+ curWrite.streamID = rws.stream.id
+ curWrite.p = p
+ curWrite.endStream = rws.handlerDone
+ if err := rws.conn.writeDataFromHandler(rws.stream, curWrite, rws.frameWriteCh); err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (w *responseWriter) Flush() {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.bw.Buffered() > 0 {
+ if err := rws.bw.Flush(); err != nil {
+ // Ignore the error. The frame writer already knows.
+ return
+ }
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ rws.writeChunk(nil)
+ }
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ go func() {
+ rws.stream.cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(http.Header)
+ }
+ return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+ if !rws.wroteHeader {
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
+ }
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler migth call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *responseWriter) handlerDone() {
+ rws := w.rws
+ if rws == nil {
+ panic("handlerDone called twice")
+ }
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ responseWriterStatePool.Put(rws)
+}
diff --git a/vendor/src/golang.org/x/net/http2/transport.go b/vendor/src/golang.org/x/net/http2/transport.go
new file mode 100644
index 0000000000..73f358eefe
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/transport.go
@@ -0,0 +1,553 @@
+// Copyright 2015 The Go Authors.
+// See https://go.googlesource.com/go/+/master/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://go.googlesource.com/go/+/master/LICENSE
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+type Transport struct {
+ Fallback http.RoundTripper
+
+ // TODO: remove this and make more general with a TLS dial hook, like http
+ InsecureTLSDial bool
+
+ connMu sync.Mutex
+ conns map[string][]*clientConn // key is host:port
+}
+
+type clientConn struct {
+ t *Transport
+ tconn *tls.Conn
+ tlsState *tls.ConnectionState
+ connKey []string // key(s) this connection is cached in, in t.conns
+
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+ hdec *hpack.Decoder
+ nextRes *http.Response
+
+ mu sync.Mutex
+ closed bool
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ streams map[uint32]*clientStream
+ nextStreamID uint32
+ bw *bufio.Writer
+ werr error // first write error that has occurred
+ br *bufio.Reader
+ fr *Framer
+ // Settings from peer:
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ initialWindowSize uint32
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+}
+
+type clientStream struct {
+ ID uint32
+ resc chan resAndError
+ pw *io.PipeWriter
+ pr *io.PipeReader
+}
+
+type stickyErrWriter struct {
+ w io.Writer
+ err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ n, err = sew.w.Write(p)
+ *sew.err = err
+ return
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if req.URL.Scheme != "https" {
+ if t.Fallback == nil {
+ return nil, errors.New("http2: unsupported scheme and no Fallback")
+ }
+ return t.Fallback.RoundTrip(req)
+ }
+
+ host, port, err := net.SplitHostPort(req.URL.Host)
+ if err != nil {
+ host = req.URL.Host
+ port = "443"
+ }
+
+ for {
+ cc, err := t.getClientConn(host, port)
+ if err != nil {
+ return nil, err
+ }
+ res, err := cc.roundTrip(req)
+ if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)?
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+ t.connMu.Lock()
+ defer t.connMu.Unlock()
+ for _, vv := range t.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+var errClientConnClosed = errors.New("http2: client conn is closed")
+
+func shouldRetryRequest(err error) bool {
+ // TODO: or GOAWAY graceful shutdown stuff
+ return err == errClientConnClosed
+}
+
+func (t *Transport) removeClientConn(cc *clientConn) {
+ t.connMu.Lock()
+ defer t.connMu.Unlock()
+ for _, key := range cc.connKey {
+ vv, ok := t.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ t.conns[key] = newList
+ } else {
+ delete(t.conns, key)
+ }
+ }
+}
+
+func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ return out
+}
+
+func (t *Transport) getClientConn(host, port string) (*clientConn, error) {
+ t.connMu.Lock()
+ defer t.connMu.Unlock()
+
+ key := net.JoinHostPort(host, port)
+
+ for _, cc := range t.conns[key] {
+ if cc.canTakeNewRequest() {
+ return cc, nil
+ }
+ }
+ if t.conns == nil {
+ t.conns = make(map[string][]*clientConn)
+ }
+ cc, err := t.newClientConn(host, port, key)
+ if err != nil {
+ return nil, err
+ }
+ t.conns[key] = append(t.conns[key], cc)
+ return cc, nil
+}
+
+func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) {
+ cfg := &tls.Config{
+ ServerName: host,
+ NextProtos: []string{NextProtoTLS},
+ InsecureSkipVerify: t.InsecureTLSDial,
+ }
+ tconn, err := tls.Dial("tcp", net.JoinHostPort(host, port), cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := tconn.Handshake(); err != nil {
+ return nil, err
+ }
+ if !t.InsecureTLSDial {
+ if err := tconn.VerifyHostname(cfg.ServerName); err != nil {
+ return nil, err
+ }
+ }
+ state := tconn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ // TODO(bradfitz): fall back to Fallback
+ return nil, fmt.Errorf("bad protocol: %v", p)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("could not negotiate protocol mutually")
+ }
+ if _, err := tconn.Write(clientPreface); err != nil {
+ return nil, err
+ }
+
+ cc := &clientConn{
+ t: t,
+ tconn: tconn,
+ connKey: []string{key}, // TODO: cert's validated hostnames too
+ tlsState: &state,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
+ streams: make(map[uint32]*clientStream),
+ }
+ cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr})
+ cc.br = bufio.NewReader(tconn)
+ cc.fr = NewFramer(cc.bw, cc.br)
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+ cc.fr.WriteSettings()
+ // TODO: re-send more conn-level flow control tokens when server uses all these.
+ cc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs?
+ cc.bw.Flush()
+ if cc.werr != nil {
+ return nil, cc.werr
+ }
+
+ // Read the obligatory SETTINGS frame
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ return nil, fmt.Errorf("expected settings frame, got: %T", f)
+ }
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+
+ sf.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more
+ log.Printf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+ // TODO: figure out henc size
+ cc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField)
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *clientConn) setGoAway(f *GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.goAway = f
+}
+
+func (cc *clientConn) canTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.goAway == nil &&
+ int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
+ cc.nextStreamID < 2147483647
+}
+
+func (cc *clientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ cc.tconn.Close()
+}
+
+func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) {
+ cc.mu.Lock()
+
+ if cc.closed {
+ cc.mu.Unlock()
+ return nil, errClientConnClosed
+ }
+
+ cs := cc.newStream()
+ hasBody := false // TODO
+
+ // we send: HEADERS[+CONTINUATION] + (DATA?)
+ hdrs := cc.encodeHeaders(req)
+ first := true
+ for len(hdrs) > 0 {
+ chunk := hdrs
+ if len(chunk) > int(cc.maxFrameSize) {
+ chunk = chunk[:cc.maxFrameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: cs.ID,
+ BlockFragment: chunk,
+ EndStream: !hasBody,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(cs.ID, endHeaders, chunk)
+ }
+ }
+ cc.bw.Flush()
+ werr := cc.werr
+ cc.mu.Unlock()
+
+ if hasBody {
+ // TODO: write data. and it should probably be interleaved:
+ // go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc
+ }
+
+ if werr != nil {
+ return nil, werr
+ }
+
+ re := <-cs.resc
+ if re.err != nil {
+ return nil, re.err
+ }
+ res := re.res
+ res.Request = req
+ res.TLS = cc.tlsState
+ return res, nil
+}
+
+// requires cc.mu be held.
+func (cc *clientConn) encodeHeaders(req *http.Request) []byte {
+ cc.hbuf.Reset()
+
+ // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+
+ path := req.URL.Path
+ if path == "" {
+ path = "/"
+ }
+
+ cc.writeHeader(":authority", host) // probably not right for all sites
+ cc.writeHeader(":method", req.Method)
+ cc.writeHeader(":path", path)
+ cc.writeHeader(":scheme", "https")
+
+ for k, vv := range req.Header {
+ lowKey := strings.ToLower(k)
+ if lowKey == "host" {
+ continue
+ }
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes()
+}
+
+func (cc *clientConn) writeHeader(name, value string) {
+ log.Printf("sending %q = %q", name, value)
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+ res *http.Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *clientConn) newStream() *clientStream {
+ cs := &clientStream{
+ ID: cc.nextStreamID,
+ resc: make(chan resAndError, 1),
+ }
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ return cs
+}
+
+func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cs := cc.streams[id]
+ if andRemove {
+ delete(cc.streams, id)
+ }
+ return cs
+}
+
+// runs in its own goroutine.
+func (cc *clientConn) readLoop() {
+ defer cc.t.removeClientConn(cc)
+ defer close(cc.readerDone)
+
+ activeRes := map[uint32]*clientStream{} // keyed by streamID
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ defer func() {
+ err := cc.readerErr
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ for _, cs := range activeRes {
+ cs.pw.CloseWithError(err)
+ }
+ }()
+
+ // continueStreamID is the stream ID we're waiting for
+ // continuation frames for.
+ var continueStreamID uint32
+
+ for {
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ cc.readerErr = err
+ return
+ }
+ log.Printf("Transport received %v: %#v", f.Header(), f)
+
+ streamID := f.Header().StreamID
+
+ _, isContinue := f.(*ContinuationFrame)
+ if isContinue {
+ if streamID != continueStreamID {
+ log.Printf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, continueStreamID)
+ cc.readerErr = ConnectionError(ErrCodeProtocol)
+ return
+ }
+ } else if continueStreamID != 0 {
+ // Continue frames need to be adjacent in the stream
+ // and we were in the middle of headers.
+ log.Printf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, continueStreamID)
+ cc.readerErr = ConnectionError(ErrCodeProtocol)
+ return
+ }
+
+ if streamID%2 == 0 {
+ // Ignore streams pushed from the server for now.
+ // These always have an even stream id.
+ continue
+ }
+ streamEnded := false
+ if ff, ok := f.(streamEnder); ok {
+ streamEnded = ff.StreamEnded()
+ }
+
+ cs := cc.streamByID(streamID, streamEnded)
+ if cs == nil {
+ log.Printf("Received frame for untracked stream ID %d", streamID)
+ continue
+ }
+
+ switch f := f.(type) {
+ case *HeadersFrame:
+ cc.nextRes = &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: make(http.Header),
+ }
+ cs.pr, cs.pw = io.Pipe()
+ cc.hdec.Write(f.HeaderBlockFragment())
+ case *ContinuationFrame:
+ cc.hdec.Write(f.HeaderBlockFragment())
+ case *DataFrame:
+ log.Printf("DATA: %q", f.Data())
+ cs.pw.Write(f.Data())
+ case *GoAwayFrame:
+ cc.t.removeClientConn(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ log.Printf("transport got GOAWAY with error code = %v", f.ErrCode)
+ }
+ cc.setGoAway(f)
+ default:
+ log.Printf("Transport: unhandled response frame type %T", f)
+ }
+ headersEnded := false
+ if he, ok := f.(headersEnder); ok {
+ headersEnded = he.HeadersEnded()
+ if headersEnded {
+ continueStreamID = 0
+ } else {
+ continueStreamID = streamID
+ }
+ }
+
+ if streamEnded {
+ cs.pw.Close()
+ delete(activeRes, streamID)
+ }
+ if headersEnded {
+ if cs == nil {
+ panic("couldn't find stream") // TODO be graceful
+ }
+ // TODO: set the Body to one which notes the
+ // Close and also sends the server a
+ // RST_STREAM
+ cc.nextRes.Body = cs.pr
+ res := cc.nextRes
+ activeRes[streamID] = cs
+ cs.resc <- resAndError{res: res}
+ }
+ }
+}
+
+func (cc *clientConn) onNewHeaderField(f hpack.HeaderField) {
+ // TODO: verifiy pseudo headers come before non-pseudo headers
+ // TODO: verifiy the status is set
+ log.Printf("Header field: %+v", f)
+ if f.Name == ":status" {
+ code, err := strconv.Atoi(f.Value)
+ if err != nil {
+ panic("TODO: be graceful")
+ }
+ cc.nextRes.Status = f.Value + " " + http.StatusText(code)
+ cc.nextRes.StatusCode = code
+ return
+ }
+ if strings.HasPrefix(f.Name, ":") {
+ // "Endpoints MUST NOT generate pseudo-header fields other than those defined in this document."
+ // TODO: treat as invalid?
+ return
+ }
+ cc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value)
+}
diff --git a/vendor/src/golang.org/x/net/http2/write.go b/vendor/src/golang.org/x/net/http2/write.go
new file mode 100644
index 0000000000..02f0743de6
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/write.go
@@ -0,0 +1,204 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+ writeFrame(writeContext) error
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+// TODO: use it from the client code too, once it exists.
+type writeContext interface {
+ Framer() *Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// endsStream reports whether the given frame writer w will locally
+// close the stream.
+func endsStream(w writeFramer) bool {
+ switch v := w.(type) {
+ case *writeData:
+ return v.endStream
+ case *writeResHeaders:
+ return v.endStream
+ }
+ return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+ return ctx.Flush()
+}
+
+type writeSettings []Setting
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+ maxStreamID uint32
+ code ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ if p.code != 0 {
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ time.Sleep(50 * time.Millisecond)
+ ctx.CloseConn()
+ }
+ return err
+}
+
+type writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers from a server handler.
+type writeResHeaders struct {
+ streamID uint32
+ httpResCode int
+ h http.Header // may be nil
+ endStream bool
+
+ contentType string
+ contentLength string
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)})
+ for k, vv := range w.h {
+ k = lowerHeader(k)
+ for _, v := range vv {
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if k == "transfer-encoding" && v != "trailers" {
+ continue
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ }
+ }
+ if w.contentType != "" {
+ enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType})
+ }
+ if w.contentLength != "" {
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength})
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 {
+ panic("unexpected empty hpack")
+ }
+
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ endHeaders := len(headerBlock) == 0
+ var err error
+ if first {
+ first = false
+ err = ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+type writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
diff --git a/vendor/src/golang.org/x/net/http2/writesched.go b/vendor/src/golang.org/x/net/http2/writesched.go
new file mode 100644
index 0000000000..0e1b7486fb
--- /dev/null
+++ b/vendor/src/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,286 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import "fmt"
+
+// frameWriteMsg is a request to write a frame.
+type frameWriteMsg struct {
+ // write is the interface value that does the writing, once the
+ // writeScheduler (below) has decided to select this frame
+ // to write. The write functions are all defined in write.go.
+ write writeFramer
+
+ stream *stream // used for prioritization. nil for non-stream frames.
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// for debugging only:
+func (wm frameWriteMsg) String() string {
+ var streamID uint32
+ if wm.stream != nil {
+ streamID = wm.stream.id
+ }
+ var des string
+ if s, ok := wm.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wm.write)
+ }
+ return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
+}
+
+// writeScheduler tracks pending frames to write, priorities, and decides
+// the next one to use. It is not thread-safe.
+type writeScheduler struct {
+ // zero are frames not associated with a specific stream.
+ // They're sent before any stream-specific freams.
+ zero writeQueue
+
+ // maxFrameSize is the maximum size of a DATA frame
+ // we'll write. Must be non-zero and between 16K-16M.
+ maxFrameSize uint32
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // when a stream is idle, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // canSend is a slice of memory that's reused between frame
+ // scheduling decisions to hold the list of writeQueues (from sq)
+ // which have enough flow control data to send. After canSend is
+ // built, the best is selected.
+ canSend []*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool []*writeQueue
+}
+
+func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
+ if len(q.s) != 0 {
+ panic("queue must be empty")
+ }
+ ws.queuePool = append(ws.queuePool, q)
+}
+
+func (ws *writeScheduler) getEmptyQueue() *writeQueue {
+ ln := len(ws.queuePool)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ q := ws.queuePool[ln-1]
+ ws.queuePool = ws.queuePool[:ln-1]
+ return q
+}
+
+func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
+
+func (ws *writeScheduler) add(wm frameWriteMsg) {
+ st := wm.stream
+ if st == nil {
+ ws.zero.push(wm)
+ } else {
+ ws.streamQueue(st.id).push(wm)
+ }
+}
+
+func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
+ if q, ok := ws.sq[streamID]; ok {
+ return q
+ }
+ if ws.sq == nil {
+ ws.sq = make(map[uint32]*writeQueue)
+ }
+ q := ws.getEmptyQueue()
+ ws.sq[streamID] = q
+ return q
+}
+
+// take returns the most important frame to write and removes it from the scheduler.
+// It is illegal to call this if the scheduler is empty or if there are no connection-level
+// flow control bytes available.
+func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
+ if ws.maxFrameSize == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+
+ // If there any frames not associated with streams, prefer those first.
+ // These are usually SETTINGS, etc.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ if len(ws.sq) == 0 {
+ return
+ }
+
+ // Next, prioritize frames on streams that aren't DATA frames (no cost).
+ for id, q := range ws.sq {
+ if q.firstIsNoCost() {
+ return ws.takeFrom(id, q)
+ }
+ }
+
+ // Now, all that remains are DATA frames with non-zero bytes to
+ // send. So pick the best one.
+ if len(ws.canSend) != 0 {
+ panic("should be empty")
+ }
+ for _, q := range ws.sq {
+ if n := ws.streamWritableBytes(q); n > 0 {
+ ws.canSend = append(ws.canSend, q)
+ }
+ }
+ if len(ws.canSend) == 0 {
+ return
+ }
+ defer ws.zeroCanSend()
+
+ // TODO: find the best queue
+ q := ws.canSend[0]
+
+ return ws.takeFrom(q.streamID(), q)
+}
+
+// zeroCanSend is defered from take.
+func (ws *writeScheduler) zeroCanSend() {
+ for i := range ws.canSend {
+ ws.canSend[i] = nil
+ }
+ ws.canSend = ws.canSend[:0]
+}
+
+// streamWritableBytes returns the number of DATA bytes we could write
+// from the given queue's stream, if this stream/queue were
+// selected. It is an error to call this if q's head isn't a
+// *writeData.
+func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
+ wm := q.head()
+ ret := wm.stream.flow.available() // max we can write
+ if ret == 0 {
+ return 0
+ }
+ if int32(ws.maxFrameSize) < ret {
+ ret = int32(ws.maxFrameSize)
+ }
+ if ret == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+ wd := wm.write.(*writeData)
+ if len(wd.p) < int(ret) {
+ ret = int32(len(wd.p))
+ }
+ return ret
+}
+
+func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
+ wm = q.head()
+ // If the first item in this queue costs flow control tokens
+ // and we don't have enough, write as much as we can.
+ if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
+ allowed := wm.stream.flow.available() // max we can write
+ if allowed == 0 {
+ // No quota available. Caller can try the next stream.
+ return frameWriteMsg{}, false
+ }
+ if int32(ws.maxFrameSize) < allowed {
+ allowed = int32(ws.maxFrameSize)
+ }
+ // TODO: further restrict the allowed size, because even if
+ // the peer says it's okay to write 16MB data frames, we might
+ // want to write smaller ones to properly weight competing
+ // streams' priorities.
+
+ if len(wd.p) > int(allowed) {
+ wm.stream.flow.take(allowed)
+ chunk := wd.p[:allowed]
+ wd.p = wd.p[allowed:]
+ // Make up a new write message of a valid size, rather
+ // than shifting one off the queue.
+ return frameWriteMsg{
+ stream: wm.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: chunk,
+ // even if the original had endStream set, there
+ // arebytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false:
+ endStream: false,
+ },
+ // our caller is blocking on the final DATA frame, not
+ // these intermediates, so no need to wait:
+ done: nil,
+ }, true
+ }
+ wm.stream.flow.take(int32(len(wd.p)))
+ }
+
+ q.shift()
+ if q.empty() {
+ ws.putEmptyQueue(q)
+ delete(ws.sq, id)
+ }
+ return wm, true
+}
+
+func (ws *writeScheduler) forgetStream(id uint32) {
+ q, ok := ws.sq[id]
+ if !ok {
+ return
+ }
+ delete(ws.sq, id)
+
+ // But keep it for others later.
+ for i := range q.s {
+ q.s[i] = frameWriteMsg{}
+ }
+ q.s = q.s[:0]
+ ws.putEmptyQueue(q)
+}
+
+type writeQueue struct {
+ s []frameWriteMsg
+}
+
+// streamID returns the stream ID for a non-empty stream-specific queue.
+func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wm frameWriteMsg) {
+ q.s = append(q.s, wm)
+}
+
+// head returns the next item that would be removed by shift.
+func (q *writeQueue) head() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ return q.s[0]
+}
+
+func (q *writeQueue) shift() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wm := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = frameWriteMsg{}
+ q.s = q.s[:len(q.s)-1]
+ return wm
+}
+
+func (q *writeQueue) firstIsNoCost() bool {
+ if df, ok := q.s[0].write.(*writeData); ok {
+ return len(df.p) == 0
+ }
+ return true
+}
diff --git a/vendor/src/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/src/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 0000000000..1119f34482
--- /dev/null
+++ b/vendor/src/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries // import "golang.org/x/net/internal/timeseries"
+
+import (
+ "fmt"
+ "log"
+ "time"
+)
+
+const (
+ timeSeriesNumBuckets = 64
+ minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 10 * time.Second,
+ 1 * time.Minute,
+ 10 * time.Minute,
+ 1 * time.Hour,
+ 6 * time.Hour,
+ 24 * time.Hour, // 1 day
+ 7 * 24 * time.Hour, // 1 week
+ 4 * 7 * 24 * time.Hour, // 4 weeks
+ 16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+ Multiply(ratio float64) // Multiplies the data in self by a given ratio
+ Add(other Observable) // Adds the data from a different observation to self
+ Clear() // Clears the observation so it can be reused.
+ CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+ f := Float(0)
+ return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+ o := other.(*Float)
+ *f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+ o := other.(*Float)
+ *f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+ Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+ oldest int // index to oldest bucketed Observable
+ newest int // index to newest bucketed Observable
+ end time.Time // end timestamp for this level
+ size time.Duration // duration of the bucketed Observable
+ buckets []Observable // collections of observations
+ provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+ l.oldest = 0
+ l.newest = len(l.buckets) - 1
+ l.end = time.Time{}
+ for i := range l.buckets {
+ if l.buckets[i] != nil {
+ l.buckets[i].Clear()
+ l.buckets[i] = nil
+ }
+ }
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+ l.size = size
+ l.provider = f
+ l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+ provider func() Observable // make more Observable
+ numBuckets int // number of buckets in each level
+ levels []*tsLevel // levels of bucketed Observable
+ lastAdd time.Time // time of last Observable tracked
+ total Observable // convenient aggregation of all Observable
+ clock Clock // Clock for getting current time
+ pending Observable // observations not yet bucketed
+ pendingTime time.Time // what time are we keeping in pending
+ dirty bool // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+ ts.provider = f
+ ts.numBuckets = numBuckets
+ ts.clock = clock
+ ts.levels = make([]*tsLevel, len(resolutions))
+
+ for i := range resolutions {
+ if i > 0 && resolutions[i-1] >= resolutions[i] {
+ log.Print("timeseries: resolutions must be monotonically increasing")
+ break
+ }
+ newLevel := new(tsLevel)
+ newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+ ts.levels[i] = newLevel
+ }
+
+ ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+ ts.lastAdd = time.Time{}
+ ts.total = ts.resetObservation(ts.total)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.pendingTime = time.Time{}
+ ts.dirty = false
+
+ for i := range ts.levels {
+ ts.levels[i].Clear()
+ }
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+ ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+ smallBucketDuration := ts.levels[0].size
+
+ if t.After(ts.lastAdd) {
+ ts.lastAdd = t
+ }
+
+ if t.After(ts.pendingTime) {
+ ts.advance(t)
+ ts.mergePendingUpdates()
+ ts.pendingTime = ts.levels[0].end
+ ts.pending.CopyFrom(observation)
+ ts.dirty = true
+ } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+ // The observation is close enough to go into the pending bucket.
+ // This compensates for clock skewing and small scheduling delays
+ // by letting the update stay in the fast path.
+ ts.pending.Add(observation)
+ ts.dirty = true
+ } else {
+ ts.mergeValue(observation, t)
+ }
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+ for _, level := range ts.levels {
+ index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+ if 0 <= index && index < ts.numBuckets {
+ bucketNumber := (level.oldest + index) % ts.numBuckets
+ if level.buckets[bucketNumber] == nil {
+ level.buckets[bucketNumber] = level.provider()
+ }
+ level.buckets[bucketNumber].Add(observation)
+ }
+ }
+ ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+ if ts.dirty {
+ ts.mergeValue(ts.pending, ts.pendingTime)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.dirty = false
+ }
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+ if !t.After(ts.levels[0].end) {
+ return
+ }
+ for i := 0; i < len(ts.levels); i++ {
+ level := ts.levels[i]
+ if !level.end.Before(t) {
+ break
+ }
+
+ // If the time is sufficiently far, just clear the level and advance
+ // directly.
+ if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+ for _, b := range level.buckets {
+ ts.resetObservation(b)
+ }
+ level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+ }
+
+ for t.After(level.end) {
+ level.end = level.end.Add(level.size)
+ level.newest = level.oldest
+ level.oldest = (level.oldest + 1) % ts.numBuckets
+ ts.resetObservation(level.buckets[level.newest])
+ }
+
+ t = level.end
+ }
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ result := ts.provider()
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ if l.buckets[index] != nil {
+ result.Add(l.buckets[index])
+ }
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index--
+ }
+
+ return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+ if level < 0 || level > len(ts.levels) {
+ log.Print("timeseries: bad level argument: ", level)
+ return nil
+ }
+ if num < 0 || num >= ts.numBuckets {
+ log.Print("timeseries: bad num argument: ", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ result := ts.provider()
+ results[i] = result
+ if l.buckets[index] != nil {
+ result.CopyFrom(l.buckets[index])
+ }
+
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index -= 1
+ }
+ return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+ for _, l := range ts.levels {
+ for i := 0; i < ts.numBuckets; i++ {
+ l.buckets[i].Multiply(factor)
+ }
+ }
+
+ ts.total.Multiply(factor)
+ ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+ return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+ now := ts.clock.Time()
+ return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+ ts.mergePendingUpdates()
+ return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+ if start.After(finish) {
+ log.Printf("timeseries: start > finish, %v>%v", start, finish)
+ return nil
+ }
+
+ if num < 0 {
+ log.Printf("timeseries: num < 0, %v", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+
+ for _, l := range ts.levels {
+ if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+ ts.extract(l, start, finish, num, results)
+ return results
+ }
+ }
+
+ // Failed to find a level that covers the desired range. So just
+ // extract from the last level, even if it doesn't cover the entire
+ // desired range.
+ ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+ return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+ if delta < 0 {
+ return nil
+ }
+ now := ts.clock.Time()
+ return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+ ts.mergePendingUpdates()
+
+ srcInterval := l.size
+ dstInterval := finish.Sub(start) / time.Duration(num)
+ dstStart := start
+ srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+ srcIndex := 0
+
+ // Where should scanning start?
+ if dstStart.After(srcStart) {
+ advance := dstStart.Sub(srcStart) / srcInterval
+ srcIndex += int(advance)
+ srcStart = srcStart.Add(advance * srcInterval)
+ }
+
+ // The i'th value is computed as show below.
+ // interval = (finish/start)/num
+ // i'th value = sum of observation in range
+ // [ start + i * interval,
+ // start + (i + 1) * interval )
+ for i := 0; i < num; i++ {
+ results[i] = ts.resetObservation(results[i])
+ dstEnd := dstStart.Add(dstInterval)
+ for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+ srcEnd := srcStart.Add(srcInterval)
+ if srcEnd.After(ts.lastAdd) {
+ srcEnd = ts.lastAdd
+ }
+
+ if !srcEnd.Before(dstStart) {
+ srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+ if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+ // dst completely contains src.
+ if srcValue != nil {
+ results[i].Add(srcValue)
+ }
+ } else {
+ // dst partially overlaps src.
+ overlapStart := maxTime(srcStart, dstStart)
+ overlapEnd := minTime(srcEnd, dstEnd)
+ base := srcEnd.Sub(srcStart)
+ fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+ used := ts.provider()
+ if srcValue != nil {
+ used.CopyFrom(srcValue)
+ }
+ used.Multiply(fraction)
+ results[i].Add(used)
+ }
+
+ if srcEnd.After(dstEnd) {
+ break
+ }
+ }
+ srcIndex++
+ srcStart = srcStart.Add(srcInterval)
+ }
+ dstStart = dstStart.Add(dstInterval)
+ }
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+ if observation == nil {
+ observation = ts.provider()
+ } else {
+ observation.Clear()
+ }
+ return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+ timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+ return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+ ts := new(TimeSeries)
+ ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+ return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+ timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+ return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+ ts := new(MinuteHourSeries)
+ ts.timeSeries.init(minuteHourSeriesResolutions, f,
+ minuteHourSeriesNumBuckets, clock)
+ return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+ return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+ return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/src/golang.org/x/net/trace/events.go b/vendor/src/golang.org/x/net/trace/events.go
new file mode 100644
index 0000000000..e66c7e3282
--- /dev/null
+++ b/vendor/src/golang.org/x/net/trace/events.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "text/tabwriter"
+ "time"
+)
+
+var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "trimSpace": strings.TrimSpace,
+}).Parse(eventsHTML))
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+ MaxErrAge time.Duration
+ String string
+}
+
+var buckets = []bucket{
+ {0, "total"},
+ {10 * time.Second, "errs<10s"},
+ {1 * time.Minute, "errs<1m"},
+ {10 * time.Minute, "errs<10m"},
+ {1 * time.Hour, "errs<1h"},
+ {10 * time.Hour, "errs<10h"},
+ {24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+ now := time.Now()
+ data := &struct {
+ Families []string // family names
+ Buckets []bucket
+ Counts [][]int // eventLog count per family/bucket
+
+ // Set when a bucket has been selected.
+ Family string
+ Bucket int
+ EventLogs eventLogs
+ Expanded bool
+ }{
+ Buckets: buckets,
+ }
+
+ data.Families = make([]string, 0, len(families))
+ famMu.RLock()
+ for name := range families {
+ data.Families = append(data.Families, name)
+ }
+ famMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // Count the number of eventLogs in each family for each error age.
+ data.Counts = make([][]int, len(data.Families))
+ for i, name := range data.Families {
+ // TODO(sameer): move this loop under the family lock.
+ f := getEventFamily(name)
+ data.Counts[i] = make([]int, len(data.Buckets))
+ for j, b := range data.Buckets {
+ data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+ }
+ }
+
+ if req != nil {
+ var ok bool
+ data.Family, data.Bucket, ok = parseEventsArgs(req)
+ if !ok {
+ // No-op
+ } else {
+ data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+ }
+ if data.EventLogs != nil {
+ defer data.EventLogs.Free()
+ sort.Sort(data.EventLogs)
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ }
+
+ famMu.RLock()
+ defer famMu.RUnlock()
+ if err := eventsTmpl.Execute(w, data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < 0 || b >= len(buckets) {
+ return "", 0, false
+ }
+ return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+ // Printf formats its arguments with fmt.Sprintf and adds the
+ // result to the event log.
+ Printf(format string, a ...interface{})
+
+ // Errorf is like Printf, but it marks this event as an error.
+ Errorf(format string, a ...interface{})
+
+ // Finish declares that this event log is complete.
+ // The event log should not be used after calling this method.
+ Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+ el := newEventLog()
+ el.ref()
+ el.Family, el.Title = family, title
+ el.Start = time.Now()
+ el.events = make([]logEntry, 0, maxEventsPerLog)
+ el.stack = make([]uintptr, 32)
+ n := runtime.Callers(2, el.stack)
+ el.stack = el.stack[:n]
+
+ getEventFamily(family).add(el)
+ return el
+}
+
+func (el *eventLog) Finish() {
+ getEventFamily(el.Family).remove(el)
+ el.unref() // matches ref in New
+}
+
+var (
+ famMu sync.RWMutex
+ families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+ famMu.Lock()
+ defer famMu.Unlock()
+ f := families[fam]
+ if f == nil {
+ f = &eventFamily{}
+ families[fam] = f
+ }
+ return f
+}
+
+type eventFamily struct {
+ mu sync.RWMutex
+ eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+ f.mu.Lock()
+ f.eventLogs = append(f.eventLogs, el)
+ f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ for i, el0 := range f.eventLogs {
+ if el == el0 {
+ copy(f.eventLogs[i:], f.eventLogs[i+1:])
+ f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+ return
+ }
+ }
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ n++
+ }
+ }
+ return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ els = make(eventLogs, 0, len(f.eventLogs))
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ el.ref()
+ els = append(els, el)
+ }
+ }
+ return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+ for _, el := range els {
+ el.unref()
+ }
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in log
+ NewDay bool // whether this event is on a different day to the previous event
+ What string
+ IsErr bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+ // Family is the top-level grouping of event logs to which this belongs.
+ Family string
+
+ // Title is the title of this event log.
+ Title string
+
+ // Timing information.
+ Start time.Time
+
+ // Call stack where this event log was created.
+ stack []uintptr
+
+ // Append-only sequence of events.
+ //
+ // TODO(sameer): change this to a ring buffer to avoid the array copy
+ // when we hit maxEventsPerLog.
+ mu sync.RWMutex
+ events []logEntry
+ LastErrorTime time.Time
+ discarded int
+
+ refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ el.Family = ""
+ el.Title = ""
+ el.Start = time.Time{}
+ el.stack = nil
+ el.events = nil
+ el.LastErrorTime = time.Time{}
+ el.discarded = 0
+ el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+ if maxErrAge == 0 {
+ return true
+ }
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+ if len(el.events) == 0 {
+ return t.Sub(el.Start), false
+ }
+ prev := el.events[len(el.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+ el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+ el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+ e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+ el.mu.Lock()
+ e.Elapsed, e.NewDay = el.delta(e.When)
+ if len(el.events) < maxEventsPerLog {
+ el.events = append(el.events, e)
+ } else {
+ // Discard the oldest event.
+ if el.discarded == 0 {
+ // el.discarded starts at two to count for the event it
+ // is replacing, plus the next one that we are about to
+ // drop.
+ el.discarded = 2
+ } else {
+ el.discarded++
+ }
+ // TODO(sameer): if this causes allocations on a critical path,
+ // change eventLog.What to be a fmt.Stringer, as in trace.go.
+ el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ el.events[0].When = el.events[1].When
+ copy(el.events[1:], el.events[2:])
+ el.events[maxEventsPerLog-1] = e
+ }
+ if e.IsErr {
+ el.LastErrorTime = e.When
+ }
+ el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+ atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+ if atomic.AddInt32(&el.refs, -1) == 0 {
+ freeEventLog(el)
+ }
+}
+
+func (el *eventLog) When() string {
+ return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+ elapsed := time.Since(el.Start)
+ return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+ buf := new(bytes.Buffer)
+ tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+ printStackRecord(tw, el.stack)
+ tw.Flush()
+ return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ if strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
+ }
+}
+
+func (el *eventLog) Events() []logEntry {
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+ select {
+ case el := <-freeEventLogs:
+ return el
+ default:
+ return new(eventLog)
+ }
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+ el.reset()
+ select {
+ case freeEventLogs <- el:
+ default:
+ }
+}
+
+const eventsHTML = `
+<html>
+ <head>
+ <title>events</title>
+ </head>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#req-status td.family {
+ padding-right: 2em;
+ }
+ table#req-status td.active {
+ padding-right: 1em;
+ }
+ table#req-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ <body>
+
+<h1>/debug/events</h1>
+
+<table id="req-status">
+ {{range $i, $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{range $j, $bucket := $.Buckets}}
+ {{$n := index $.Counts $i $j}}
+ <td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} {{$bucket.String}}]
+ {{if $n}}</a>{{end}}
+ </td>
+ {{end}}
+
+ </tr>{{end}}
+</table>
+
+{{if $.EventLogs}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
+[Summary]{{if $.Expanded}}</a>{{end}}
+
+{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
+[Expanded]{{if not $.Expanded}}</a>{{end}}
+
+<table id="reqs">
+ <tr><th>When</th><th>Elapsed</th></tr>
+ {{range $el := $.EventLogs}}
+ <tr class="first">
+ <td class="when">{{$el.When}}</td>
+ <td class="elapsed">{{$el.ElapsedTime}}</td>
+ <td>{{$el.Title}}
+ </tr>
+ {{if $.Expanded}}
+ <tr>
+ <td class="when"></td>
+ <td class="elapsed"></td>
+ <td><pre>{{$el.Stack|trimSpace}}</pre></td>
+ </tr>
+ {{range $el.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}}
+ </body>
+</html>
+`
diff --git a/vendor/src/golang.org/x/net/trace/histogram.go b/vendor/src/golang.org/x/net/trace/histogram.go
new file mode 100644
index 0000000000..bb42aa5320
--- /dev/null
+++ b/vendor/src/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,356 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "log"
+ "math"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+const (
+ bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+ sum int64 // running total of measurements
+ sumOfSquares float64 // square of running total
+ buckets []int64 // bucketed values for histogram
+ value int // holds a single value as an optimization
+ valueCount int64 // number of values recorded for single value
+}
+
+// AddMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+ // TODO: assert invariant
+ h.sum += value
+ h.sumOfSquares += float64(value) * float64(value)
+
+ bucketIndex := getBucket(value)
+
+ if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+ h.value = bucketIndex
+ h.valueCount++
+ } else {
+ h.allocateBuckets()
+ h.buckets[bucketIndex]++
+ }
+}
+
+func (h *histogram) allocateBuckets() {
+ if h.buckets == nil {
+ h.buckets = make([]int64, bucketCount)
+ h.buckets[h.value] = h.valueCount
+ h.value = 0
+ h.valueCount = -1
+ }
+}
+
+func log2(i int64) int {
+ n := 0
+ for ; i >= 0x100; i >>= 8 {
+ n += 8
+ }
+ for ; i > 0; i >>= 1 {
+ n += 1
+ }
+ return n
+}
+
+func getBucket(i int64) (index int) {
+ index = log2(i) - 1
+ if index < 0 {
+ index = 0
+ }
+ if index >= bucketCount {
+ index = bucketCount - 1
+ }
+ return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+ if h.valueCount >= 0 {
+ total = h.valueCount
+ }
+ for _, val := range h.buckets {
+ total += int64(val)
+ }
+ return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+ t := h.total()
+ if t == 0 {
+ return 0
+ }
+ return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+ t := float64(h.total())
+ if t == 0 {
+ return 0
+ }
+ s := float64(h.sum) / t
+ return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+ return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+ total := h.total()
+
+ // Corner cases (make sure result is strictly less than Total())
+ if total == 0 {
+ return 0
+ } else if total == 1 {
+ return int64(h.average())
+ }
+
+ percentOfTotal := round(float64(total) * percentile)
+ var runningTotal int64
+
+ for i := range h.buckets {
+ value := h.buckets[i]
+ runningTotal += value
+ if runningTotal == percentOfTotal {
+ // We hit an exact bucket boundary. If the next bucket has data, it is a
+ // good estimate of the value. If the bucket is empty, we interpolate the
+ // midpoint between the next bucket's boundary and the next non-zero
+ // bucket. If the remaining buckets are all empty, then we use the
+ // boundary for the next bucket as the estimate.
+ j := uint8(i + 1)
+ min := bucketBoundary(j)
+ if runningTotal < total {
+ for h.buckets[j] == 0 {
+ j++
+ }
+ }
+ max := bucketBoundary(j)
+ return min + round(float64(max-min)/2)
+ } else if runningTotal > percentOfTotal {
+ // The value is in this bucket. Interpolate the value.
+ delta := runningTotal - percentOfTotal
+ percentBucket := float64(value-delta) / float64(value)
+ bucketMin := bucketBoundary(uint8(i))
+ nextBucketMin := bucketBoundary(uint8(i + 1))
+ bucketSize := nextBucketMin - bucketMin
+ return bucketMin + round(percentBucket*float64(bucketSize))
+ }
+ }
+ return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+ return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == 0 {
+ // Other histogram is empty
+ } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+ // Both have a single bucketed value, aggregate them
+ h.valueCount += o.valueCount
+ } else {
+ // Two different values necessitate buckets in this histogram
+ h.allocateBuckets()
+ if o.valueCount >= 0 {
+ h.buckets[o.value] += o.valueCount
+ } else {
+ for i := range h.buckets {
+ h.buckets[i] += o.buckets[i]
+ }
+ }
+ }
+ h.sumOfSquares += o.sumOfSquares
+ h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+ h.buckets = nil
+ h.value = 0
+ h.valueCount = 0
+ h.sum = 0
+ h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == -1 {
+ h.allocateBuckets()
+ copy(h.buckets, o.buckets)
+ }
+ h.sum = o.sum
+ h.sumOfSquares = o.sumOfSquares
+ h.value = o.value
+ h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+ if h.valueCount == -1 {
+ for i := range h.buckets {
+ h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+ }
+ } else {
+ h.valueCount = int64(float64(h.valueCount) * ratio)
+ }
+ h.sum = int64(float64(h.sum) * ratio)
+ h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+ r := new(histogram)
+ r.Clear()
+ return r
+}
+
+func (h *histogram) String() string {
+ return fmt.Sprintf("%d, %f, %d, %d, %v",
+ h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+ return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+ if bucket == 0 {
+ return 0
+ }
+ return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+ Lower, Upper int64
+ N int64
+ Pct, CumulativePct float64
+ GraphWidth int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+ Buckets []*bucketData
+ Count, Median int64
+ Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+ // Force the allocation of buckets to simplify the rendering implementation
+ h.allocateBuckets()
+ // We scale the bars on the right so that the largest bar is
+ // maxHTMLBarWidth pixels in width.
+ maxBucket := int64(0)
+ for _, n := range h.buckets {
+ if n > maxBucket {
+ maxBucket = n
+ }
+ }
+ total := h.total()
+ barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+ var pctMult float64
+ if total == 0 {
+ pctMult = 1.0
+ } else {
+ pctMult = 100.0 / float64(total)
+ }
+
+ buckets := make([]*bucketData, len(h.buckets))
+ runningTotal := int64(0)
+ for i, n := range h.buckets {
+ if n == 0 {
+ continue
+ }
+ runningTotal += n
+ var upperBound int64
+ if i < bucketCount-1 {
+ upperBound = bucketBoundary(uint8(i + 1))
+ } else {
+ upperBound = math.MaxInt64
+ }
+ buckets[i] = &bucketData{
+ Lower: bucketBoundary(uint8(i)),
+ Upper: upperBound,
+ N: n,
+ Pct: float64(n) * pctMult,
+ CumulativePct: float64(runningTotal) * pctMult,
+ GraphWidth: int(float64(n) * barsizeMult),
+ }
+ }
+ return &data{
+ Buckets: buckets,
+ Count: total,
+ Median: h.median(),
+ Mean: h.average(),
+ StandardDeviation: h.standardDeviation(),
+ }
+}
+
+func (h *histogram) html() template.HTML {
+ buf := new(bytes.Buffer)
+ if err := distTmpl.Execute(buf, h.newData()); err != nil {
+ buf.Reset()
+ log.Printf("net/trace: couldn't execute template: %v", err)
+ }
+ return template.HTML(buf.String())
+}
+
+// Input: data
+var distTmpl = template.Must(template.New("distTmpl").Parse(`
+<table>
+<tr>
+ <td style="padding:0.25em">Count: {{.Count}}</td>
+ <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
+ <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
+ <td style="padding:0.25em">Median: {{.Median}}</td>
+</tr>
+</table>
+<hr>
+<table>
+{{range $b := .Buckets}}
+{{if $b}}
+ <tr>
+ <td style="padding:0 0 0 0.25em">[</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
+ <td style="text-align:right;padding:0 0.25em">{{.N}}</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
+ <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
+ </tr>
+{{end}}
+{{end}}
+</table>
+`))
diff --git a/vendor/src/golang.org/x/net/trace/trace.go b/vendor/src/golang.org/x/net/trace/trace.go
new file mode 100644
index 0000000000..c44cb7ec9e
--- /dev/null
+++ b/vendor/src/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1057 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+ func fooHandler(w http.ResponseWriter, req *http.Request) {
+ tr := trace.New("mypkg.Foo", req.URL.Path)
+ defer tr.Finish()
+ ...
+ tr.LazyPrintf("some event %q happened", str)
+ ...
+ if err := somethingImportant(); err != nil {
+ tr.LazyPrintf("somethingImportant failed: %v", err)
+ tr.SetError()
+ }
+ }
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration. It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+ // A Fetcher fetches URL paths for a single domain.
+ type Fetcher struct {
+ domain string
+ events trace.EventLog
+ }
+
+ func NewFetcher(domain string) *Fetcher {
+ return &Fetcher{
+ domain,
+ trace.NewEventLog("mypkg.Fetcher", domain),
+ }
+ }
+
+ func (f *Fetcher) Fetch(path string) (string, error) {
+ resp, err := http.Get("http://" + f.domain + "/" + path)
+ if err != nil {
+ f.events.Errorf("Get(%q) = %v", path, err)
+ return "", err
+ }
+ f.events.Printf("Get(%q) = %s", path, resp.Status)
+ ...
+ }
+
+ func (f *Fetcher) Close() error {
+ f.events.Finish()
+ return nil
+ }
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error. The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace // import "golang.org/x/net/trace"
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customise its authorisation requirements.
+//
+// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ switch {
+ case err != nil: // Badly formed address; fail closed.
+ return false, false
+ case host == "localhost" || host == "127.0.0.1" || host == "::1":
+ return true, true
+ default:
+ return false, false
+ }
+}
+
+func init() {
+ http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ Render(w, req, sensitive)
+ })
+ http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ RenderEvents(w, req, sensitive)
+ })
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+ data := &struct {
+ Families []string
+ ActiveTraceCount map[string]int
+ CompletedTraces map[string]*family
+
+ // Set when a bucket has been selected.
+ Traces traceList
+ Family string
+ Bucket int
+ Expanded bool
+ Traced bool
+ Active bool
+ ShowSensitive bool // whether to show sensitive events
+
+ Histogram template.HTML
+ HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+ // If non-zero, the set of traces is a partial set,
+ // and this is the total number.
+ Total int
+ }{
+ CompletedTraces: completedTraces,
+ }
+
+ data.ShowSensitive = sensitive
+ if req != nil {
+ // Allow show_sensitive=0 to force hiding of sensitive data for testing.
+ // This only goes one way; you can't use show_sensitive=1 to see things.
+ if req.FormValue("show_sensitive") == "0" {
+ data.ShowSensitive = false
+ }
+
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+ data.Traced = exp
+ }
+ }
+
+ completedMu.RLock()
+ data.Families = make([]string, 0, len(completedTraces))
+ for fam, _ := range completedTraces {
+ data.Families = append(data.Families, fam)
+ }
+ completedMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // We are careful here to minimize the time spent locking activeMu,
+ // since that lock is required every time an RPC starts and finishes.
+ data.ActiveTraceCount = make(map[string]int, len(data.Families))
+ activeMu.RLock()
+ for fam, s := range activeTraces {
+ data.ActiveTraceCount[fam] = s.Len()
+ }
+ activeMu.RUnlock()
+
+ var ok bool
+ data.Family, data.Bucket, ok = parseArgs(req)
+ switch {
+ case !ok:
+ // No-op
+ case data.Bucket == -1:
+ data.Active = true
+ n := data.ActiveTraceCount[data.Family]
+ data.Traces = getActiveTraces(data.Family)
+ if len(data.Traces) < n {
+ data.Total = n
+ }
+ case data.Bucket < bucketsPerFamily:
+ if b := lookupBucket(data.Family, data.Bucket); b != nil {
+ data.Traces = b.Copy(data.Traced)
+ }
+ default:
+ if f := getFamily(data.Family, false); f != nil {
+ var obs timeseries.Observable
+ f.LatencyMu.RLock()
+ switch o := data.Bucket - bucketsPerFamily; o {
+ case 0:
+ obs = f.Latency.Minute()
+ data.HistogramWindow = "last minute"
+ case 1:
+ obs = f.Latency.Hour()
+ data.HistogramWindow = "last hour"
+ case 2:
+ obs = f.Latency.Total()
+ data.HistogramWindow = "all time"
+ }
+ f.LatencyMu.RUnlock()
+ if obs != nil {
+ data.Histogram = obs.(*histogram).html()
+ }
+ }
+ }
+
+ if data.Traces != nil {
+ defer data.Traces.Free()
+ sort.Sort(data.Traces)
+ }
+
+ completedMu.RLock()
+ defer completedMu.RUnlock()
+ if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+ if req == nil {
+ return "", 0, false
+ }
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < -1 {
+ return "", 0, false
+ }
+
+ return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+ f := getFamily(fam, false)
+ if f == nil || b < 0 || b >= len(f.Buckets) {
+ return nil
+ }
+ return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+ return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+ tr, ok = ctx.Value(contextKey).(Trace)
+ return
+}
+
+// Trace represents an active request.
+type Trace interface {
+ // LazyLog adds x to the event log. It will be evaluated each time the
+ // /debug/requests page is rendered. Any memory referenced by x will be
+ // pinned until the trace is finished and later discarded.
+ LazyLog(x fmt.Stringer, sensitive bool)
+
+ // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+ // /debug/requests page is rendered. Any memory referenced by a will be
+ // pinned until the trace is finished and later discarded.
+ LazyPrintf(format string, a ...interface{})
+
+ // SetError declares that this trace resulted in an error.
+ SetError()
+
+ // SetRecycler sets a recycler for the trace.
+ // f will be called for each event passed to LazyLog at a time when
+ // it is no longer required, whether while the trace is still active
+ // and the event is discarded, or when a completed trace is discarded.
+ SetRecycler(f func(interface{}))
+
+ // SetTraceInfo sets the trace info for the trace.
+ // This is currently unused.
+ SetTraceInfo(traceID, spanID uint64)
+
+ // SetMaxEvents sets the maximum number of events that will be stored
+ // in the trace. This has no effect if any events have already been
+ // added to the trace.
+ SetMaxEvents(m int)
+
+ // Finish declares that this trace is complete.
+ // The trace should not be used after calling this method.
+ Finish()
+}
+
+type lazySprintf struct {
+ format string
+ a []interface{}
+}
+
+func (l *lazySprintf) String() string {
+ return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+ tr := newTrace()
+ tr.ref()
+ tr.Family, tr.Title = family, title
+ tr.Start = time.Now()
+ tr.events = make([]event, 0, maxEventsPerTrace)
+
+ activeMu.RLock()
+ s := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ if s == nil {
+ activeMu.Lock()
+ s = activeTraces[tr.Family] // check again
+ if s == nil {
+ s = new(traceSet)
+ activeTraces[tr.Family] = s
+ }
+ activeMu.Unlock()
+ }
+ s.Add(tr)
+
+ // Trigger allocation of the completed trace structure for this family.
+ // This will cause the family to be present in the request page during
+ // the first trace of this family. We don't care about the return value,
+ // nor is there any need for this to run inline, so we execute it in its
+ // own goroutine, but only if the family isn't allocated yet.
+ completedMu.RLock()
+ if _, ok := completedTraces[tr.Family]; !ok {
+ go allocFamily(tr.Family)
+ }
+ completedMu.RUnlock()
+
+ return tr
+}
+
+func (tr *trace) Finish() {
+ tr.Elapsed = time.Now().Sub(tr.Start)
+ if DebugUseAfterFinish {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ tr.finishStack = buf[:n]
+ }
+
+ activeMu.RLock()
+ m := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ m.Remove(tr)
+
+ f := getFamily(tr.Family, true)
+ for _, b := range f.Buckets {
+ if b.Cond.match(tr) {
+ b.Add(tr)
+ }
+ }
+ // Add a sample of elapsed time as microseconds to the family's timeseries
+ h := new(histogram)
+ h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
+ f.LatencyMu.Lock()
+ f.Latency.Add(h)
+ f.LatencyMu.Unlock()
+
+ tr.unref() // matches ref in New
+}
+
+const (
+ bucketsPerFamily = 9
+ tracesPerBucket = 10
+ maxActiveTraces = 20 // Maximum number of active traces to show.
+ maxEventsPerTrace = 10
+ numHistogramBuckets = 38
+)
+
+var (
+ // The active traces.
+ activeMu sync.RWMutex
+ activeTraces = make(map[string]*traceSet) // family -> traces
+
+ // Families of completed traces.
+ completedMu sync.RWMutex
+ completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+ mu sync.RWMutex
+ m map[*trace]bool
+
+ // We could avoid the entire map scan in FirstN by having a slice of all the traces
+ // ordered by start time, and an index into that from the trace struct, with a periodic
+ // repack of the slice after enough traces finish; we could also use a skip list or similar.
+ // However, that would shift some of the expense from /debug/requests time to RPC time,
+ // which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+ return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+ ts.mu.Lock()
+ if ts.m == nil {
+ ts.m = make(map[*trace]bool)
+ }
+ ts.m[tr] = true
+ ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+ ts.mu.Lock()
+ delete(ts.m, tr)
+ ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+
+ if n > len(ts.m) {
+ n = len(ts.m)
+ }
+ trl := make(traceList, 0, n)
+
+ // Fast path for when no selectivity is needed.
+ if n == len(ts.m) {
+ for tr := range ts.m {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ sort.Sort(trl)
+ return trl
+ }
+
+ // Pick the oldest n traces.
+ // This is inefficient. See the comment in the traceSet struct.
+ for tr := range ts.m {
+ // Put the first n traces into trl in the order they occur.
+ // When we have n, sort trl, and thereafter maintain its order.
+ if len(trl) < n {
+ tr.ref()
+ trl = append(trl, tr)
+ if len(trl) == n {
+ // This is guaranteed to happen exactly once during this loop.
+ sort.Sort(trl)
+ }
+ continue
+ }
+ if tr.Start.After(trl[n-1].Start) {
+ continue
+ }
+
+ // Find where to insert this one.
+ tr.ref()
+ i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+ trl[n-1].unref()
+ copy(trl[i+1:], trl[i:])
+ trl[i] = tr
+ }
+
+ return trl
+}
+
+func getActiveTraces(fam string) traceList {
+ activeMu.RLock()
+ s := activeTraces[fam]
+ activeMu.RUnlock()
+ if s == nil {
+ return nil
+ }
+ return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+ completedMu.RLock()
+ f := completedTraces[fam]
+ completedMu.RUnlock()
+ if f == nil && allocNew {
+ f = allocFamily(fam)
+ }
+ return f
+}
+
+func allocFamily(fam string) *family {
+ completedMu.Lock()
+ defer completedMu.Unlock()
+ f := completedTraces[fam]
+ if f == nil {
+ f = newFamily()
+ completedTraces[fam] = f
+ }
+ return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+ // traces may occur in multiple buckets.
+ Buckets [bucketsPerFamily]*traceBucket
+
+ // latency time series
+ LatencyMu sync.RWMutex
+ Latency *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+ return &family{
+ Buckets: [bucketsPerFamily]*traceBucket{
+ {Cond: minCond(0)},
+ {Cond: minCond(50 * time.Millisecond)},
+ {Cond: minCond(100 * time.Millisecond)},
+ {Cond: minCond(200 * time.Millisecond)},
+ {Cond: minCond(500 * time.Millisecond)},
+ {Cond: minCond(1 * time.Second)},
+ {Cond: minCond(10 * time.Second)},
+ {Cond: minCond(100 * time.Second)},
+ {Cond: errorCond{}},
+ },
+ Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+ }
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+ Cond cond
+
+ // Ring buffer implementation of a fixed-size FIFO queue.
+ mu sync.RWMutex
+ buf [tracesPerBucket]*trace
+ start int // < tracesPerBucket
+ length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ i := b.start + b.length
+ if i >= tracesPerBucket {
+ i -= tracesPerBucket
+ }
+ if b.length == tracesPerBucket {
+ // "Remove" an element from the bucket.
+ b.buf[i].unref()
+ b.start++
+ if b.start == tracesPerBucket {
+ b.start = 0
+ }
+ }
+ b.buf[i] = tr
+ if b.length < tracesPerBucket {
+ b.length++
+ }
+ tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ trl := make(traceList, 0, b.length)
+ for i, x := 0, b.start; i < b.length; i++ {
+ tr := b.buf[x]
+ if !tracedOnly || tr.spanID != 0 {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ x++
+ if x == b.length {
+ x = 0
+ }
+ }
+ return trl
+}
+
+func (b *traceBucket) Empty() bool {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+ match(t *trace) bool
+ String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+ for _, t := range trl {
+ t.unref()
+ }
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in trace
+ NewDay bool // whether this event is on a different day to the previous event
+ Recyclable bool // whether this event was passed via LazyLog
+ What interface{} // string or fmt.Stringer
+ Sensitive bool // whether this event contains sensitive information
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+ return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+ // Family is the top-level grouping of traces to which this belongs.
+ Family string
+
+ // Title is the title of this trace.
+ Title string
+
+ // Timing information.
+ Start time.Time
+ Elapsed time.Duration // zero while active
+
+ // Trace information if non-zero.
+ traceID uint64
+ spanID uint64
+
+ // Whether this trace resulted in an error.
+ IsError bool
+
+ // Append-only sequence of events (modulo discards).
+ mu sync.RWMutex
+ events []event
+
+ refs int32 // how many buckets this is in
+ recycler func(interface{})
+ disc discarded // scratch space to avoid allocation
+
+ finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+}
+
+func (tr *trace) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ tr.Family = ""
+ tr.Title = ""
+ tr.Start = time.Time{}
+ tr.Elapsed = 0
+ tr.traceID = 0
+ tr.spanID = 0
+ tr.IsError = false
+ tr.events = nil
+ tr.refs = 0
+ tr.recycler = nil
+ tr.disc = 0
+ tr.finishStack = nil
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+ if len(tr.events) == 0 {
+ return t.Sub(tr.Start), false
+ }
+ prev := tr.events[len(tr.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+ if DebugUseAfterFinish && tr.finishStack != nil {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+ }
+
+ /*
+ NOTE TO DEBUGGERS
+
+ If you are here because your program panicked in this code,
+ it is almost definitely the fault of code using this package,
+ and very unlikely to be the fault of this code.
+
+ The most likely scenario is that some code elsewhere is using
+ a requestz.Trace after its Finish method is called.
+ You can temporarily set the DebugUseAfterFinish var
+ to help discover where that is; do not leave that var set,
+ since it makes this package much less efficient.
+ */
+
+ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+ tr.mu.Lock()
+ e.Elapsed, e.NewDay = tr.delta(e.When)
+ if len(tr.events) < cap(tr.events) {
+ tr.events = append(tr.events, e)
+ } else {
+ // Discard the middle events.
+ di := int((cap(tr.events) - 1) / 2)
+ if d, ok := tr.events[di].What.(*discarded); ok {
+ (*d)++
+ } else {
+ // disc starts at two to count for the event it is replacing,
+ // plus the next one that we are about to drop.
+ tr.disc = 2
+ if tr.recycler != nil && tr.events[di].Recyclable {
+ go tr.recycler(tr.events[di].What)
+ }
+ tr.events[di].What = &tr.disc
+ }
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ tr.events[di].When = tr.events[di+1].When
+
+ if tr.recycler != nil && tr.events[di+1].Recyclable {
+ go tr.recycler(tr.events[di+1].What)
+ }
+ copy(tr.events[di+1:], tr.events[di+2:])
+ tr.events[cap(tr.events)-1] = e
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+ tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+ tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() { tr.IsError = true }
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+ tr.recycler = f
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+ tr.traceID, tr.spanID = traceID, spanID
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+ // Always keep at least three events: first, discarded count, last.
+ if len(tr.events) == 0 && m > 3 {
+ tr.events = make([]event, 0, m)
+ }
+}
+
+func (tr *trace) ref() {
+ atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+ if atomic.AddInt32(&tr.refs, -1) == 0 {
+ if tr.recycler != nil {
+ // freeTrace clears tr, so we hold tr.recycler and tr.events here.
+ go func(f func(interface{}), es []event) {
+ for _, e := range es {
+ if e.Recyclable {
+ f(e.What)
+ }
+ }
+ }(tr.recycler, tr.events)
+ }
+
+ freeTrace(tr)
+ }
+}
+
+func (tr *trace) When() string {
+ return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+ t := tr.Elapsed
+ if t == 0 {
+ // Active trace.
+ t = time.Since(tr.Start)
+ }
+ return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+ return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+ select {
+ case tr := <-traceFreeList:
+ return tr
+ default:
+ return new(trace)
+ }
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+ if DebugUseAfterFinish {
+ return // never reuse
+ }
+ tr.reset()
+ select {
+ case traceFreeList <- tr:
+ default:
+ }
+}
+
+func elapsed(d time.Duration) string {
+ b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+ // For subsecond durations, blank all zeros before decimal point,
+ // and all zeros between the decimal point and the first non-zero digit.
+ if d < time.Second {
+ dot := bytes.IndexByte(b, '.')
+ for i := 0; i < dot; i++ {
+ b[i] = ' '
+ }
+ for i := dot + 1; i < len(b); i++ {
+ if b[i] == '0' {
+ b[i] = ' '
+ } else {
+ break
+ }
+ }
+ }
+
+ return string(b)
+}
+
+var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "add": func(a, b int) int { return a + b },
+}).Parse(pageHTML))
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+<html>
+ <head>
+ <title>/debug/requests</title>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#tr-status td.family {
+ padding-right: 2em;
+ }
+ table#tr-status td.active {
+ padding-right: 1em;
+ }
+ table#tr-status td.latency-first {
+ padding-left: 1em;
+ }
+ table#tr-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ </head>
+ <body>
+
+<h1>/debug/requests</h1>
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+<table id="tr-status">
+ {{range $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{$n := index $.ActiveTraceCount $fam}}
+ <td class="active {{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} active]
+ {{if $n}}</a>{{end}}
+ </td>
+
+ {{$f := index $.CompletedTraces $fam}}
+ {{range $i, $b := $f.Buckets}}
+ {{$empty := $b.Empty}}
+ <td {{if $empty}}class="empty"{{end}}>
+ {{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{.Cond}}]
+ {{if not $empty}}</a>{{end}}
+ </td>
+ {{end}}
+
+ {{$nb := len $f.Buckets}}
+ <td class="latency-first">
+ <a href="?fam={{$fam}}&b={{$nb}}">[minute]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
+ </td>
+
+ </tr>
+ {{end}}
+</table>
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if or $.Expanded $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
+{{else}}
+ [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
+{{else}}
+ [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+ {{if or $.Expanded (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
+ {{else}}
+ [Traced/Summary]
+ {{end}}
+ {{if or (not $.Expanded) (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
+ {{else}}
+ [Traced/Expanded]
+ {{end}}
+{{end}}
+
+{{if $.Total}}
+<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
+{{end}}
+
+<table id="reqs">
+ <caption>
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests
+ </caption>
+ <tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
+ {{range $tr := $.Traces}}
+ <tr class="first">
+ <td class="when">{{$tr.When}}</td>
+ <td class="elapsed">{{$tr.ElapsedTime}}</td>
+ <td>{{$tr.Title}}</td>
+ {{/* TODO: include traceID/spanID */}}
+ </tr>
+ {{if $.Expanded}}
+ {{range $tr.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+ </body>
+</html>
+{{end}} {{/* end of Epilog */}}
+`
diff --git a/vendor/src/golang.org/x/oauth2/.travis.yml b/vendor/src/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 0000000000..a035125c35
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.3
+ - 1.4
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/golang.org/x"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+ - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+ - go test -v golang.org/x/oauth2/...
diff --git a/vendor/src/golang.org/x/oauth2/AUTHORS b/vendor/src/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 0000000000..15167cd746
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/src/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/src/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 0000000000..46aa2b12dd
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/vendor/src/golang.org/x/oauth2/CONTRIBUTORS b/vendor/src/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 0000000000..1c4577e968
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/src/golang.org/x/oauth2/LICENSE b/vendor/src/golang.org/x/oauth2/LICENSE
new file mode 100644
index 0000000000..d02f24fd52
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/golang.org/x/oauth2/README.md b/vendor/src/golang.org/x/oauth2/README.md
new file mode 100644
index 0000000000..0d5141733f
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/README.md
@@ -0,0 +1,64 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means its no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+ import (
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ newappengine "google.golang.org/appengine"
+ newurlfetch "google.golang.org/appengine/urlfetch"
+
+ "appengine"
+ )
+
+ func handler(w http.ResponseWriter, r *http.Request) {
+ var c appengine.Context = appengine.NewContext(r)
+ c.Infof("Logging a message with the old package")
+
+ var ctx context.Context = newappengine.NewContext(r)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "scope"),
+ Base: &newurlfetch.Transport{Context: ctx},
+ },
+ }
+ client.Get("...")
+ }
+
diff --git a/vendor/src/golang.org/x/oauth2/client_appengine.go b/vendor/src/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 0000000000..8962c49d1d
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+ "google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+ internal.RegisterContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+ return urlfetch.Client(ctx), nil
+}
diff --git a/vendor/src/golang.org/x/oauth2/google/appengine.go b/vendor/src/golang.org/x/oauth2/google/appengine.go
new file mode 100644
index 0000000000..dc993efb5e
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/google/appengine.go
@@ -0,0 +1,86 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+)
+
+// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
+var appengineVM bool
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// The provided context must have come from appengine.NewContext.
+func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+ scopes := append([]string{}, scope...)
+ sort.Strings(scopes)
+ return &appEngineTokenSource{
+ ctx: ctx,
+ scopes: scopes,
+ key: strings.Join(scopes, " "),
+ }
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+ aeTokensMu sync.Mutex
+ aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+ mu sync.Mutex // guards t; held while fetching or updating t
+ t *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+ ctx context.Context
+ scopes []string
+ key string // to aeTokens map; space-separated scopes
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+
+ aeTokensMu.Lock()
+ tok, ok := aeTokens[ts.key]
+ if !ok {
+ tok = &tokenLock{}
+ aeTokens[ts.key] = tok
+ }
+ aeTokensMu.Unlock()
+
+ tok.mu.Lock()
+ defer tok.mu.Unlock()
+ if tok.t.Valid() {
+ return tok.t, nil
+ }
+ access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+ if err != nil {
+ return nil, err
+ }
+ tok.t = &oauth2.Token{
+ AccessToken: access,
+ Expiry: exp,
+ }
+ return tok.t, nil
+}
diff --git a/vendor/src/golang.org/x/oauth2/google/appengine_hook.go b/vendor/src/golang.org/x/oauth2/google/appengine_hook.go
new file mode 100644
index 0000000000..4f42c8b343
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/google/appengine_hook.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+ appengineTokenFunc = appengine.AccessToken
+}
diff --git a/vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go
new file mode 100644
index 0000000000..633611cc3a
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+ appengineVM = true
+ appengineTokenFunc = appengine.AccessToken
+}
diff --git a/vendor/src/golang.org/x/oauth2/google/default.go b/vendor/src/golang.org/x/oauth2/google/default.go
new file mode 100644
index 0000000000..b952362977
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/google/default.go
@@ -0,0 +1,155 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/cloud/compute/metadata"
+)
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
+//
+// This client should be used when developing services
+// that run on Google App Engine or Google Compute Engine
+// and use "Application Default Credentials."
+//
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+//
+func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
+ ts, err := DefaultTokenSource(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return oauth2.NewClient(ctx, ts), nil
+}
+
+// DefaultTokenSource is a token source that uses
+// "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+// 1. A JSON file whose path is specified by the
+// GOOGLE_APPLICATION_CREDENTIALS environment variable.
+// 2. A JSON file in a location known to the gcloud command-line tool.
+// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// 3. On Google App Engine it uses the appengine.AccessToken function.
+// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
+// credentials from the metadata server.
+// (In this final case any provided scopes are ignored.)
+//
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+//
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+ // First, try the environment variable.
+ const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
+ if filename := os.Getenv(envVar); filename != "" {
+ ts, err := tokenSourceFromFile(ctx, filename, scope)
+ if err != nil {
+ return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
+ }
+ return ts, nil
+ }
+
+ // Second, try a well-known file.
+ filename := wellKnownFile()
+ _, err := os.Stat(filename)
+ if err == nil {
+ ts, err2 := tokenSourceFromFile(ctx, filename, scope)
+ if err2 == nil {
+ return ts, nil
+ }
+ err = err2
+ } else if os.IsNotExist(err) {
+ err = nil // ignore this error
+ }
+ if err != nil {
+ return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
+ }
+
+ // Third, if we're on Google App Engine use those credentials.
+ if appengineTokenFunc != nil && !appengineVM {
+ return AppEngineTokenSource(ctx, scope...), nil
+ }
+
+ // Fourth, if we're on Google Compute Engine use the metadata server.
+ if metadata.OnGCE() {
+ return ComputeTokenSource(""), nil
+ }
+
+ // None are found; return helpful error.
+ const url = "https://developers.google.com/accounts/docs/application-default-credentials"
+ return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
+}
+
+func wellKnownFile() string {
+ const f = "application_default_credentials.json"
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
+ }
+ return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
+}
+
+func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ var d struct {
+ // Common fields
+ Type string
+ ClientID string `json:"client_id"`
+
+ // User Credential fields
+ ClientSecret string `json:"client_secret"`
+ RefreshToken string `json:"refresh_token"`
+
+ // Service Account fields
+ ClientEmail string `json:"client_email"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ }
+ if err := json.Unmarshal(b, &d); err != nil {
+ return nil, err
+ }
+ switch d.Type {
+ case "authorized_user":
+ cfg := &oauth2.Config{
+ ClientID: d.ClientID,
+ ClientSecret: d.ClientSecret,
+ Scopes: append([]string{}, scopes...), // copy
+ Endpoint: Endpoint,
+ }
+ tok := &oauth2.Token{RefreshToken: d.RefreshToken}
+ return cfg.TokenSource(ctx, tok), nil
+ case "service_account":
+ cfg := &jwt.Config{
+ Email: d.ClientEmail,
+ PrivateKey: []byte(d.PrivateKey),
+ Scopes: append([]string{}, scopes...), // copy
+ TokenURL: JWTTokenURL,
+ }
+ return cfg.TokenSource(ctx), nil
+ case "":
+ return nil, errors.New("missing 'type' field in credentials")
+ default:
+ return nil, fmt.Errorf("unknown credential type: %q", d.Type)
+ }
+}
diff --git a/vendor/src/golang.org/x/oauth2/google/google.go b/vendor/src/golang.org/x/oauth2/google/google.go
new file mode 100644
index 0000000000..9a3d5feb1b
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/google/google.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+package google // import "golang.org/x/oauth2/google"
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/cloud/compute/metadata"
+)
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// ConfigFromJSON uses a Google Developers Console client_credentials.json
+// file to construct a config.
+// client_credentials.json can be downloadable from https://console.developers.google.com,
+// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
+// JSON format and provide the contents of the file as jsonKey.
+func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
+ type cred struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RedirectURIs []string `json:"redirect_uris"`
+ AuthURI string `json:"auth_uri"`
+ TokenURI string `json:"token_uri"`
+ }
+ var j struct {
+ Web *cred `json:"web"`
+ Installed *cred `json:"installed"`
+ }
+ if err := json.Unmarshal(jsonKey, &j); err != nil {
+ return nil, err
+ }
+ var c *cred
+ switch {
+ case j.Web != nil:
+ c = j.Web
+ case j.Installed != nil:
+ c = j.Installed
+ default:
+ return nil, fmt.Errorf("oauth2/google: no credentials found")
+ }
+ if len(c.RedirectURIs) < 1 {
+ return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
+ }
+ return &oauth2.Config{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RedirectURL: c.RedirectURIs[0],
+ Scopes: scope,
+ Endpoint: oauth2.Endpoint{
+ AuthURL: c.AuthURI,
+ TokenURL: c.TokenURI,
+ },
+ }, nil
+}
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" page under "APIs & Auth" for your
+// project at https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+ var key struct {
+ Email string `json:"client_email"`
+ PrivateKey string `json:"private_key"`
+ }
+ if err := json.Unmarshal(jsonKey, &key); err != nil {
+ return nil, err
+ }
+ return &jwt.Config{
+ Email: key.Email,
+ PrivateKey: []byte(key.PrivateKey),
+ Scopes: scope,
+ TokenURL: JWTTokenURL,
+ }, nil
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+ account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+ if !metadata.OnGCE() {
+ return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+ }
+ acct := cs.account
+ if acct == "" {
+ acct = "default"
+ }
+ tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+ if err != nil {
+ return nil, err
+ }
+ var res struct {
+ AccessToken string `json:"access_token"`
+ ExpiresInSec int `json:"expires_in"`
+ TokenType string `json:"token_type"`
+ }
+ err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+ }
+ if res.ExpiresInSec == 0 || res.AccessToken == "" {
+ return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+ }
+ return &oauth2.Token{
+ AccessToken: res.AccessToken,
+ TokenType: res.TokenType,
+ Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+ }, nil
+}
diff --git a/vendor/src/golang.org/x/oauth2/google/jwt.go b/vendor/src/golang.org/x/oauth2/google/jwt.go
new file mode 100644
index 0000000000..b91991786f
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/google/jwt.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "crypto/rsa"
+ "fmt"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+ "golang.org/x/oauth2/jws"
+)
+
+// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
+// key file to read the credentials that authorize and authenticate the
+// requests, and returns a TokenSource that does not use any OAuth2 flow but
+// instead creates a JWT and sends that as the access token.
+// The audience is typically a URL that specifies the scope of the credentials.
+//
+// Note that this is not a standard OAuth flow, but rather an
+// optimization supported by a few Google services.
+// Unless you know otherwise, you should use JWTConfigFromJSON instead.
+func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
+ cfg, err := JWTConfigFromJSON(jsonKey)
+ if err != nil {
+ return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
+ }
+ pk, err := internal.ParseKey(cfg.PrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("google: could not parse key: %v", err)
+ }
+ ts := &jwtAccessTokenSource{
+ email: cfg.Email,
+ audience: audience,
+ pk: pk,
+ }
+ tok, err := ts.Token()
+ if err != nil {
+ return nil, err
+ }
+ return oauth2.ReuseTokenSource(tok, ts), nil
+}
+
+type jwtAccessTokenSource struct {
+ email, audience string
+ pk *rsa.PrivateKey
+}
+
+func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
+ iat := time.Now()
+ exp := iat.Add(time.Hour)
+ cs := &jws.ClaimSet{
+ Iss: ts.email,
+ Sub: ts.email,
+ Aud: ts.audience,
+ Iat: iat.Unix(),
+ Exp: exp.Unix(),
+ }
+ hdr := &jws.Header{
+ Algorithm: "RS256",
+ Typ: "JWT",
+ }
+ msg, err := jws.Encode(hdr, cs, ts.pk)
+ if err != nil {
+ return nil, fmt.Errorf("google: could not encode JWT: %v", err)
+ }
+ return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
+}
diff --git a/vendor/src/golang.org/x/oauth2/google/sdk.go b/vendor/src/golang.org/x/oauth2/google/sdk.go
new file mode 100644
index 0000000000..d29a3bb9bb
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/google/sdk.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+)
+
+type sdkCredentials struct {
+ Data []struct {
+ Credential struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+ TokenExpiry *time.Time `json:"token_expiry"`
+ } `json:"credential"`
+ Key struct {
+ Account string `json:"account"`
+ Scope string `json:"scope"`
+ } `json:"key"`
+ }
+}
+
+// An SDKConfig provides access to tokens from an account already
+// authorized via the Google Cloud SDK.
+type SDKConfig struct {
+ conf oauth2.Config
+ initialToken *oauth2.Token
+}
+
+// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
+// account. If account is empty, the account currently active in
+// Google Cloud SDK properties is used.
+// Google Cloud SDK credentials must be created by running `gcloud auth`
+// before using this function.
+// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
+func NewSDKConfig(account string) (*SDKConfig, error) {
+ configPath, err := sdkConfigPath()
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
+ }
+ credentialsPath := filepath.Join(configPath, "credentials")
+ f, err := os.Open(credentialsPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
+ }
+ defer f.Close()
+
+ var c sdkCredentials
+ if err := json.NewDecoder(f).Decode(&c); err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
+ }
+ if len(c.Data) == 0 {
+ return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
+ }
+ if account == "" {
+ propertiesPath := filepath.Join(configPath, "properties")
+ f, err := os.Open(propertiesPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
+ }
+ defer f.Close()
+ ini, err := internal.ParseINI(f)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
+ }
+ core, ok := ini["core"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
+ }
+ active, ok := core["account"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
+ }
+ account = active
+ }
+
+ for _, d := range c.Data {
+ if account == "" || d.Key.Account == account {
+ if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
+ return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
+ }
+ var expiry time.Time
+ if d.Credential.TokenExpiry != nil {
+ expiry = *d.Credential.TokenExpiry
+ }
+ return &SDKConfig{
+ conf: oauth2.Config{
+ ClientID: d.Credential.ClientID,
+ ClientSecret: d.Credential.ClientSecret,
+ Scopes: strings.Split(d.Key.Scope, " "),
+ Endpoint: Endpoint,
+ RedirectURL: "oob",
+ },
+ initialToken: &oauth2.Token{
+ AccessToken: d.Credential.AccessToken,
+ RefreshToken: d.Credential.RefreshToken,
+ Expiry: expiry,
+ },
+ }, nil
+ }
+ }
+ return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
+}
+
+// Client returns an HTTP client using Google Cloud SDK credentials to
+// authorize requests. The token will auto-refresh as necessary. The
+// underlying http.RoundTripper will be obtained using the provided
+// context. The returned client and its Transport should not be
+// modified.
+func (c *SDKConfig) Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &oauth2.Transport{
+ Source: c.TokenSource(ctx),
+ },
+ }
+}
+
+// TokenSource returns an oauth2.TokenSource that retrieve tokens from
+// Google Cloud SDK credentials using the provided context.
+// It will returns the current access token stored in the credentials,
+// and refresh it when it expires, but it won't update the credentials
+// with the new access token.
+func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return c.conf.TokenSource(ctx, c.initialToken)
+}
+
+// Scopes are the OAuth 2.0 scopes the current account is authorized for.
+func (c *SDKConfig) Scopes() []string {
+ return c.conf.Scopes
+}
+
+// sdkConfigPath tries to guess where the gcloud config is located.
+// It can be overridden during tests.
+var sdkConfigPath = func() (string, error) {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
+ }
+ homeDir := guessUnixHomeDir()
+ if homeDir == "" {
+ return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
+ }
+ return filepath.Join(homeDir, ".config", "gcloud"), nil
+}
+
+func guessUnixHomeDir() string {
+ usr, err := user.Current()
+ if err == nil {
+ return usr.HomeDir
+ }
+ return os.Getenv("HOME")
+}
diff --git a/vendor/src/golang.org/x/oauth2/internal/oauth2.go b/vendor/src/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 0000000000..fbe1028d64
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,76 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "bufio"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("private key is invalid")
+ }
+ return parsed, nil
+}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+ result := map[string]map[string]string{
+ "": map[string]string{}, // root section
+ }
+ scanner := bufio.NewScanner(ini)
+ currentSection := ""
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.HasPrefix(line, ";") {
+ // comment.
+ continue
+ }
+ if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+ currentSection = strings.TrimSpace(line[1 : len(line)-1])
+ result[currentSection] = map[string]string{}
+ continue
+ }
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) == 2 && parts[0] != "" {
+ result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning ini: %v", err)
+ }
+ return result, nil
+}
+
+func CondVal(v string) []string {
+ if v == "" {
+ return nil
+ }
+ return []string{v}
+}
diff --git a/vendor/src/golang.org/x/oauth2/internal/token.go b/vendor/src/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 0000000000..39caf6c617
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,221 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time
+
+ // Raw optionally contains extra metadata from the server
+ // when updating a token.
+ Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+ Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ if v := e.Expires; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ var n json.Number
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+ i, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ *e = expirationTime(i)
+ return nil
+}
+
+var brokenAuthHeaderProviders = []string{
+ "https://accounts.google.com/",
+ "https://api.dropbox.com/",
+ "https://api.instagram.com/",
+ "https://api.netatmo.net/",
+ "https://api.odnoklassniki.ru/",
+ "https://api.pushbullet.com/",
+ "https://api.soundcloud.com/",
+ "https://api.twitch.tv/",
+ "https://app.box.com/",
+ "https://connect.stripe.com/",
+ "https://login.microsoftonline.com/",
+ "https://login.salesforce.com/",
+ "https://oauth.sandbox.trainingpeaks.com/",
+ "https://oauth.trainingpeaks.com/",
+ "https://oauth.vk.com/",
+ "https://slack.com/",
+ "https://test-sandbox.auth.corp.google.com",
+ "https://test.salesforce.com/",
+ "https://user.gini.net/",
+ "https://www.douban.com/",
+ "https://www.googleapis.com/",
+ "https://www.linkedin.com/",
+ "https://www.strava.com/oauth/",
+}
+
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+ brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+ for _, s := range brokenAuthHeaderProviders {
+ if strings.HasPrefix(tokenURL, s) {
+ // Some sites fail to implement the OAuth2 spec fully.
+ return false
+ }
+ }
+
+ // Assume the provider implements the spec properly
+ // otherwise. We can add more exceptions as they're
+ // discovered. We will _not_ be adding configurable hooks
+ // to this package to let users select server bugs.
+ return true
+}
+
+func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {
+ hc, err := ContextClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ v.Set("client_id", ClientID)
+ bustedAuth := !providerAuthHeaderWorks(TokenURL)
+ if bustedAuth && ClientSecret != "" {
+ v.Set("client_secret", ClientSecret)
+ }
+ req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if !bustedAuth {
+ req.SetBasicAuth(ClientID, ClientSecret)
+ }
+ r, err := hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if code := r.StatusCode; code < 200 || code > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+ }
+
+ var token *Token
+ content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: vals.Get("access_token"),
+ TokenType: vals.Get("token_type"),
+ RefreshToken: vals.Get("refresh_token"),
+ Raw: vals,
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ // TODO(jbd): Facebook's OAuth2 implementation is broken and
+ // returns expires_in field in expires. Remove the fallback to expires,
+ // when Facebook fixes their implementation.
+ e = vals.Get("expires")
+ }
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: tj.AccessToken,
+ TokenType: tj.TokenType,
+ RefreshToken: tj.RefreshToken,
+ Expiry: tj.expiry(),
+ Raw: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, nil
+}
diff --git a/vendor/src/golang.org/x/oauth2/internal/transport.go b/vendor/src/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 0000000000..f1f173e345
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,69 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+// ContextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error. If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type ContextClientFunc func(context.Context) (*http.Client, error)
+
+var contextClientFuncs []ContextClientFunc
+
+func RegisterContextClientFunc(fn ContextClientFunc) {
+ contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func ContextClient(ctx context.Context) (*http.Client, error) {
+ if ctx != nil {
+ if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+ return hc, nil
+ }
+ }
+ for _, fn := range contextClientFuncs {
+ c, err := fn(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if c != nil {
+ return c, nil
+ }
+ }
+ return http.DefaultClient, nil
+}
+
+func ContextTransport(ctx context.Context) http.RoundTripper {
+ hc, err := ContextClient(ctx)
+ // This is a rare error case (somebody using nil on App Engine).
+ if err != nil {
+ return ErrorTransport{err}
+ }
+ return hc.Transport
+}
+
+// ErrorTransport returns the specified error on RoundTrip.
+// This RoundTripper should be used in rare error cases where
+// error handling can be postponed to response handling time.
+type ErrorTransport struct{ Err error }
+
+func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+ return nil, t.Err
+}
diff --git a/vendor/src/golang.org/x/oauth2/jws/jws.go b/vendor/src/golang.org/x/oauth2/jws/jws.go
new file mode 100644
index 0000000000..8ca5978432
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/jws/jws.go
@@ -0,0 +1,172 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides encoding and decoding utilities for
+// signed JWS messages.
+package jws // import "golang.org/x/oauth2/jws"
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+ Iss string `json:"iss"` // email address of the client_id of the application making the access token request
+ Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+ Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
+ Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch)
+ Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch)
+ Typ string `json:"typ,omitempty"` // token type (Optional).
+
+ // Email for which the application is requesting delegated access (Optional).
+ Sub string `json:"sub,omitempty"`
+
+ // The old name of Sub. Client keeps setting Prn to be
+ // complaint with legacy OAuth 2.0 providers. (Optional)
+ Prn string `json:"prn,omitempty"`
+
+ // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+ // This array is marshalled using custom code (see (c *ClaimSet) encode()).
+ PrivateClaims map[string]interface{} `json:"-"`
+}
+
+func (c *ClaimSet) encode() (string, error) {
+ // Reverting time back for machines whose time is not perfectly in sync.
+ // If client machine's time is in the future according
+ // to Google servers, an access token will not be issued.
+ now := time.Now().Add(-10 * time.Second)
+ if c.Iat == 0 {
+ c.Iat = now.Unix()
+ }
+ if c.Exp == 0 {
+ c.Exp = now.Add(time.Hour).Unix()
+ }
+ if c.Exp < c.Iat {
+ return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
+ }
+
+ b, err := json.Marshal(c)
+ if err != nil {
+ return "", err
+ }
+
+ if len(c.PrivateClaims) == 0 {
+ return base64Encode(b), nil
+ }
+
+ // Marshal private claim set and then append it to b.
+ prv, err := json.Marshal(c.PrivateClaims)
+ if err != nil {
+ return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+ }
+
+ // Concatenate public and private claim JSON objects.
+ if !bytes.HasSuffix(b, []byte{'}'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", b)
+ }
+ if !bytes.HasPrefix(prv, []byte{'{'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", prv)
+ }
+ b[len(b)-1] = ',' // Replace closing curly brace with a comma.
+ b = append(b, prv[1:]...) // Append private claims.
+ return base64Encode(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+ // The algorithm used for signature.
+ Algorithm string `json:"alg"`
+
+ // Represents the token type.
+ Typ string `json:"typ"`
+}
+
+func (h *Header) encode() (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return base64Encode(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+ // decode returned id token to get expiry
+ s := strings.Split(payload, ".")
+ if len(s) < 2 {
+ // TODO(jbd): Provide more context about the error.
+ return nil, errors.New("jws: invalid token received")
+ }
+ decoded, err := base64Decode(s[1])
+ if err != nil {
+ return nil, err
+ }
+ c := &ClaimSet{}
+ err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+ return c, err
+}
+
+// Signer returns a signature for the given data.
+type Signer func(data []byte) (sig []byte, err error)
+
+// EncodeWithSigner encodes a header and claim set with the provided signer.
+func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
+ head, err := header.encode()
+ if err != nil {
+ return "", err
+ }
+ cs, err := c.encode()
+ if err != nil {
+ return "", err
+ }
+ ss := fmt.Sprintf("%s.%s", head, cs)
+ sig, err := sg([]byte(ss))
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key.
+func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
+ sg := func(data []byte) (sig []byte, err error) {
+ h := sha256.New()
+ h.Write([]byte(data))
+ return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
+ }
+ return EncodeWithSigner(header, c, sg)
+}
+
+// base64Encode returns and Base64url encoded version of the input string with any
+// trailing "=" stripped.
+func base64Encode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// base64Decode decodes the Base64url encoded string
+func base64Decode(s string) ([]byte, error) {
+ // add back missing padding
+ switch len(s) % 4 {
+ case 1:
+ s += "==="
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
diff --git a/vendor/src/golang.org/x/oauth2/jwt/jwt.go b/vendor/src/golang.org/x/oauth2/jwt/jwt.go
new file mode 100644
index 0000000000..2ffad21a60
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/jwt/jwt.go
@@ -0,0 +1,153 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+ "golang.org/x/oauth2/jws"
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+ // Email is the OAuth client identifier used when communicating with
+ // the configured OAuth provider.
+ Email string
+
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. The provided
+ // private key is used to sign JWT payloads.
+ // PEM containers with a passphrase are not supported.
+ // Use the following command to convert a PKCS 12 file into a PEM.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ PrivateKey []byte
+
+ // Subject is the optional user to impersonate.
+ Subject string
+
+ // Scopes optionally specifies a list of requested permission scopes.
+ Scopes []string
+
+ // TokenURL is the endpoint required to complete the 2-legged JWT flow.
+ TokenURL string
+
+ // Expires optionally specifies how long the token is valid for.
+ Expires time.Duration
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+ ctx context.Context
+ conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+ pk, err := internal.ParseKey(js.conf.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ hc := oauth2.NewClient(js.ctx, nil)
+ claimSet := &jws.ClaimSet{
+ Iss: js.conf.Email,
+ Scope: strings.Join(js.conf.Scopes, " "),
+ Aud: js.conf.TokenURL,
+ }
+ if subject := js.conf.Subject; subject != "" {
+ claimSet.Sub = subject
+ // prn is the old name of sub. Keep setting it
+ // to be compatible with legacy OAuth 2.0 providers.
+ claimSet.Prn = subject
+ }
+ if t := js.conf.Expires; t > 0 {
+ claimSet.Exp = time.Now().Add(t).Unix()
+ }
+ payload, err := jws.Encode(defaultHeader, claimSet, pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ resp, err := hc.PostForm(js.conf.TokenURL, v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ ExpiresIn int64 `json:"expires_in"` // relative seconds from now
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ token := &oauth2.Token{
+ AccessToken: tokenRes.AccessToken,
+ TokenType: tokenRes.TokenType,
+ }
+ raw := make(map[string]interface{})
+ json.Unmarshal(body, &raw) // no error checks for optional fields
+ token = token.WithExtra(raw)
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ if v := tokenRes.IDToken; v != "" {
+ // decode returned id token to get expiry
+ claimSet, err := jws.Decode(v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+ }
+ token.Expiry = time.Unix(claimSet.Exp, 0)
+ }
+ return token, nil
+}
diff --git a/vendor/src/golang.org/x/oauth2/oauth2.go b/vendor/src/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 0000000000..a68289607b
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,337 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2 // import "golang.org/x/oauth2"
+
+import (
+ "bytes"
+ "errors"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+var NoContext = context.TODO()
+
+// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
+// identified by the tokenURL prefix as an OAuth2 implementation
+// which doesn't support the HTTP Basic authentication
+// scheme to authenticate with the authorization server.
+// Once a server is registered, credentials (client_id and client_secret)
+// will be passed as query parameters rather than being present
+// in the Authorization header.
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+ internal.RegisterBrokenAuthHeaderProvider(tokenURL)
+}
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // Endpoint contains the resource server's token endpoint
+ // URLs. These are constants specific to each server and are
+ // often available via site-specific packages, such as
+ // google.Endpoint or github.Endpoint.
+ Endpoint Endpoint
+
+ // RedirectURL is the URL to redirect users going through
+ // the OAuth flow, after the resource owner's URLs.
+ RedirectURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+ // Token returns a token or an error.
+ // Token must be safe for concurrent use by multiple goroutines.
+ // The returned Token must not be modified.
+ Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+ AuthURL string
+ TokenURL string
+}
+
+var (
+ // AccessTypeOnline and AccessTypeOffline are options passed
+ // to the Options.AuthCodeURL method. They modify the
+ // "access_type" field that gets sent in the URL returned by
+ // AuthCodeURL.
+ //
+ // Online is the default if neither is specified. If your
+ // application needs to refresh access tokens when the user
+ // is not present at the browser, then use offline. This will
+ // result in your application obtaining a refresh token the
+ // first time your application exchanges an authorization
+ // code for a user.
+ AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
+ AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+ // ApprovalForce forces the users to view the consent dialog
+ // and confirm the permissions request at the URL returned
+ // from AuthCodeURL, even if they've already done so.
+ ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+ setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+ return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+ var buf bytes.Buffer
+ buf.WriteString(c.Endpoint.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {c.ClientID},
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ "state": internal.CondVal(state),
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ if strings.Contains(c.Endpoint.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"password"},
+ "username": {username},
+ "password": {password},
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+ return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+ tkr := &tokenRefresher{
+ ctx: ctx,
+ conf: c,
+ }
+ if t != nil {
+ tkr.refreshToken = t.RefreshToken
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: tkr,
+ }
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+ ctx context.Context // used to get HTTP requests
+ conf *Config
+ refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+ if tf.refreshToken == "" {
+ return nil, errors.New("oauth2: token expired and refresh token is not set")
+ }
+
+ tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tf.refreshToken},
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if tf.refreshToken != tk.RefreshToken {
+ tf.refreshToken = tk.RefreshToken
+ }
+ return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+ new TokenSource // called when t is expired.
+
+ mu sync.Mutex // guards t
+ t *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.t.Valid() {
+ return s.t, nil
+ }
+ t, err := s.new.Token()
+ if err != nil {
+ return nil, err
+ }
+ s.t = t
+ return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+ return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+ t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+ return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+ if src == nil {
+ c, err := internal.ContextClient(ctx)
+ if err != nil {
+ return &http.Client{Transport: internal.ErrorTransport{err}}
+ }
+ return c
+ }
+ return &http.Client{
+ Transport: &Transport{
+ Base: internal.ContextTransport(ctx),
+ Source: ReuseTokenSource(nil, src),
+ },
+ }
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly.
+ return rt
+ }
+ src = rt.new
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ }
+}
diff --git a/vendor/src/golang.org/x/oauth2/token.go b/vendor/src/golang.org/x/oauth2/token.go
new file mode 100644
index 0000000000..7a3167f15b
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/token.go
@@ -0,0 +1,158 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string `json:"access_token"`
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string `json:"token_type,omitempty"`
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time `json:"expiry,omitempty"`
+
+ // raw optionally contains extra metadata from the server
+ // when updating a token.
+ raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+ if strings.EqualFold(t.TokenType, "bearer") {
+ return "Bearer"
+ }
+ if strings.EqualFold(t.TokenType, "mac") {
+ return "MAC"
+ }
+ if strings.EqualFold(t.TokenType, "basic") {
+ return "Basic"
+ }
+ if t.TokenType != "" {
+ return t.TokenType
+ }
+ return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+ r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+ t2 := new(Token)
+ *t2 = *t
+ t2.raw = extra
+ return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+ if raw, ok := t.raw.(map[string]interface{}); ok {
+ return raw[key]
+ }
+
+ vals, ok := t.raw.(url.Values)
+ if !ok {
+ return nil
+ }
+
+ v := vals.Get(key)
+ switch s := strings.TrimSpace(v); strings.Count(s, ".") {
+ case 0: // Contains no "."; try to parse as int
+ if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return i
+ }
+ case 1: // Contains a single "."; try to parse as float
+ if f, err := strconv.ParseFloat(s, 64); err == nil {
+ return f
+ }
+ }
+
+ return v
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+ if t.Expiry.IsZero() {
+ return false
+ }
+ return t.Expiry.Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+ return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+ if t == nil {
+ return nil
+ }
+ return &Token{
+ AccessToken: t.AccessToken,
+ TokenType: t.TokenType,
+ RefreshToken: t.RefreshToken,
+ Expiry: t.Expiry,
+ raw: t.Raw,
+ }
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+ if err != nil {
+ return nil, err
+ }
+ return tokenFromInternal(tk), nil
+}
diff --git a/vendor/src/golang.org/x/oauth2/transport.go b/vendor/src/golang.org/x/oauth2/transport.go
new file mode 100644
index 0000000000..92ac7e2531
--- /dev/null
+++ b/vendor/src/golang.org/x/oauth2/transport.go
@@ -0,0 +1,132 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+ // Source supplies the token to add to outgoing requests'
+ // Authorization headers.
+ Source TokenSource
+
+ // Base is the base RoundTripper used to make HTTP requests.
+ // If nil, http.DefaultTransport is used.
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if t.Source == nil {
+ return nil, errors.New("oauth2: Transport's Source is nil")
+ }
+ token, err := t.Source.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ req2 := cloneRequest(req) // per RoundTripper contract
+ token.SetAuthHeader(req2)
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *Transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
diff --git a/vendor/src/google.golang.org/api/LICENSE b/vendor/src/google.golang.org/api/LICENSE
new file mode 100644
index 0000000000..263aa7a0c1
--- /dev/null
+++ b/vendor/src/google.golang.org/api/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/google.golang.org/api/gensupport/json.go b/vendor/src/google.golang.org/api/gensupport/json.go
new file mode 100644
index 0000000000..193def5938
--- /dev/null
+++ b/vendor/src/google.golang.org/api/gensupport/json.go
@@ -0,0 +1,177 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gensupport is an internal implementation detail used by code
+// generated by the google-api-go-generator tool.
+//
+// This package may be modified at any time without regard for backwards
+// compatibility. It should not be used directly by API users.
+package gensupport
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// MarshalJSON returns a JSON encoding of schema containing only selected fields.
+// A field is selected if:
+// * it has a non-empty value, or
+// * its field name is present in forceSendFields, and
+// * it is not a nil pointer or nil interface.
+// The JSON key for each selected field is taken from the field's json: struct tag.
+func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) {
+ if len(forceSendFields) == 0 {
+ return json.Marshal(schema)
+ }
+
+ mustInclude := make(map[string]struct{})
+ for _, f := range forceSendFields {
+ mustInclude[f] = struct{}{}
+ }
+
+ dataMap, err := schemaToMap(schema, mustInclude)
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(dataMap)
+}
+
+func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) {
+ m := make(map[string]interface{})
+ s := reflect.ValueOf(schema)
+ st := s.Type()
+
+ for i := 0; i < s.NumField(); i++ {
+ jsonTag := st.Field(i).Tag.Get("json")
+ if jsonTag == "" {
+ continue
+ }
+ tag, err := parseJSONTag(jsonTag)
+ if err != nil {
+ return nil, err
+ }
+ if tag.ignore {
+ continue
+ }
+
+ v := s.Field(i)
+ f := st.Field(i)
+ if !includeField(v, f, mustInclude) {
+ continue
+ }
+
+ // nil maps are treated as empty maps.
+ if f.Type.Kind() == reflect.Map && v.IsNil() {
+ m[tag.apiName] = map[string]string{}
+ continue
+ }
+
+ // nil slices are treated as empty slices.
+ if f.Type.Kind() == reflect.Slice && v.IsNil() {
+ m[tag.apiName] = []bool{}
+ continue
+ }
+
+ if tag.stringFormat {
+ m[tag.apiName] = formatAsString(v, f.Type.Kind())
+ } else {
+ m[tag.apiName] = v.Interface()
+ }
+ }
+ return m, nil
+}
+
+// formatAsString returns a string representation of v, dereferencing it first if possible.
+func formatAsString(v reflect.Value, kind reflect.Kind) string {
+ if kind == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+
+ return fmt.Sprintf("%v", v.Interface())
+}
+
+// jsonTag represents a restricted version of the struct tag format used by encoding/json.
+// It is used to describe the JSON encoding of fields in a Schema struct.
+type jsonTag struct {
+ apiName string
+ stringFormat bool
+ ignore bool
+}
+
+// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
+// The format of the tag must match that generated by the Schema.writeSchemaStruct method
+// in the api generator.
+func parseJSONTag(val string) (jsonTag, error) {
+ if val == "-" {
+ return jsonTag{ignore: true}, nil
+ }
+
+ var tag jsonTag
+
+ i := strings.Index(val, ",")
+ if i == -1 || val[:i] == "" {
+ return tag, fmt.Errorf("malformed json tag: %s", val)
+ }
+
+ tag = jsonTag{
+ apiName: val[:i],
+ }
+
+ switch val[i+1:] {
+ case "omitempty":
+ case "omitempty,string":
+ tag.stringFormat = true
+ default:
+ return tag, fmt.Errorf("malformed json tag: %s", val)
+ }
+
+ return tag, nil
+}
+
+// Reports whether the struct field "f" with value "v" should be included in JSON output.
+func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
+ // The regular JSON encoding of a nil pointer is "null", which means "delete this field".
+ // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
+ // However, many fields are not pointers, so there would be no way to delete these fields.
+ // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
+ // Deletion will be handled by a separate mechanism.
+ if f.Type.Kind() == reflect.Ptr && v.IsNil() {
+ return false
+ }
+
+ // The "any" type is represented as an interface{}. If this interface
+ // is nil, there is no reasonable representation to send. We ignore
+ // these fields, for the same reasons as given above for pointers.
+ if f.Type.Kind() == reflect.Interface && v.IsNil() {
+ return false
+ }
+
+ _, ok := mustInclude[f.Name]
+ return ok || !isEmptyValue(v)
+}
+
+// isEmptyValue reports whether v is the empty value for its type. This
+// implementation is based on that of the encoding/json package, but its
+// correctness does not depend on it being identical. What's important is that
+// this function return false in situations where v should not be sent as part
+// of a PATCH operation.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/src/google.golang.org/api/gensupport/params.go b/vendor/src/google.golang.org/api/gensupport/params.go
new file mode 100644
index 0000000000..dfad3f414d
--- /dev/null
+++ b/vendor/src/google.golang.org/api/gensupport/params.go
@@ -0,0 +1,31 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gensupport
+
+import "net/url"
+
+// URLParams is a simplified replacement for url.Values
+// that safely builds up URL parameters for encoding.
+type URLParams map[string][]string
+
+// Set sets the key to value.
+// It replaces any existing values.
+func (u URLParams) Set(key, value string) {
+ u[key] = []string{value}
+}
+
+// SetMulti sets the key to an array of values.
+// It replaces any existing values.
+// Note that values must not be modified after calling SetMulti
+// so the caller is responsible for making a copy if necessary.
+func (u URLParams) SetMulti(key string, values []string) {
+ u[key] = values
+}
+
+// Encode encodes the values into ``URL encoded'' form
+// ("bar=baz&foo=quux") sorted by key.
+func (u URLParams) Encode() string {
+ return url.Values(u).Encode()
+}
diff --git a/vendor/src/google.golang.org/api/googleapi/googleapi.go b/vendor/src/google.golang.org/api/googleapi/googleapi.go
new file mode 100644
index 0000000000..fbae951023
--- /dev/null
+++ b/vendor/src/google.golang.org/api/googleapi/googleapi.go
@@ -0,0 +1,588 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package googleapi contains the common code shared by all Google API
+// libraries.
+package googleapi // import "google.golang.org/api/googleapi"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+ "google.golang.org/api/googleapi/internal/uritemplates"
+)
+
+// ContentTyper is an interface for Readers which know (or would like
+// to override) their Content-Type. If a media body doesn't implement
+// ContentTyper, the type is sniffed from the content using
+// http.DetectContentType.
+type ContentTyper interface {
+ ContentType() string
+}
+
+// A SizeReaderAt is a ReaderAt with a Size method.
+// An io.SectionReader implements SizeReaderAt.
+type SizeReaderAt interface {
+ io.ReaderAt
+ Size() int64
+}
+
+// ServerResponse is embedded in each Do response and
+// provides the HTTP status code and header sent by the server.
+type ServerResponse struct {
+ // HTTPStatusCode is the server's response status code.
+ // When using a resource method's Do call, this will always be in the 2xx range.
+ HTTPStatusCode int
+ // Header contains the response header fields from the server.
+ Header http.Header
+}
+
+const (
+ Version = "0.5"
+
+ // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
+ statusResumeIncomplete = 308
+
+ // UserAgent is the header string used to identify this package.
+ UserAgent = "google-api-go-client/" + Version
+
+ // uploadPause determines the delay between failed upload attempts
+ uploadPause = 1 * time.Second
+)
+
+// Error contains an error response from the server.
+type Error struct {
+ // Code is the HTTP response status code and will always be populated.
+ Code int `json:"code"`
+ // Message is the server response message and is only populated when
+ // explicitly referenced by the JSON server response.
+ Message string `json:"message"`
+ // Body is the raw response returned by the server.
+ // It is often but not always JSON, depending on how the request fails.
+ Body string
+ // Header contains the response header fields from the server.
+ Header http.Header
+
+ Errors []ErrorItem
+}
+
+// ErrorItem is a detailed error code & message from the Google API frontend.
+type ErrorItem struct {
+ // Reason is the typed error code. For example: "some_example".
+ Reason string `json:"reason"`
+ // Message is the human-readable description of the error.
+ Message string `json:"message"`
+}
+
+func (e *Error) Error() string {
+ if len(e.Errors) == 0 && e.Message == "" {
+ return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
+ }
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
+ if e.Message != "" {
+ fmt.Fprintf(&buf, "%s", e.Message)
+ }
+ if len(e.Errors) == 0 {
+ return strings.TrimSpace(buf.String())
+ }
+ if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
+ fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
+ return buf.String()
+ }
+ fmt.Fprintln(&buf, "\nMore details:")
+ for _, v := range e.Errors {
+ fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
+ }
+ return buf.String()
+}
+
+type errorReply struct {
+ Error *Error `json:"error"`
+}
+
+// CheckResponse returns an error (of type *Error) if the response
+// status code is not 2xx.
+func CheckResponse(res *http.Response) error {
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ return nil
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err == nil {
+ jerr := new(errorReply)
+ err = json.Unmarshal(slurp, jerr)
+ if err == nil && jerr.Error != nil {
+ if jerr.Error.Code == 0 {
+ jerr.Error.Code = res.StatusCode
+ }
+ jerr.Error.Body = string(slurp)
+ return jerr.Error
+ }
+ }
+ return &Error{
+ Code: res.StatusCode,
+ Body: string(slurp),
+ Header: res.Header,
+ }
+}
+
+// IsNotModified reports whether err is the result of the
+// server replying with http.StatusNotModified.
+// Such error values are sometimes returned by "Do" methods
+// on calls when If-None-Match is used.
+func IsNotModified(err error) bool {
+ if err == nil {
+ return false
+ }
+ ae, ok := err.(*Error)
+ return ok && ae.Code == http.StatusNotModified
+}
+
+// CheckMediaResponse returns an error (of type *Error) if the response
+// status code is not 2xx. Unlike CheckResponse it does not assume the
+// body is a JSON error document.
+func CheckMediaResponse(res *http.Response) error {
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ return nil
+ }
+ slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
+ res.Body.Close()
+ return &Error{
+ Code: res.StatusCode,
+ Body: string(slurp),
+ }
+}
+
+type MarshalStyle bool
+
+var WithDataWrapper = MarshalStyle(true)
+var WithoutDataWrapper = MarshalStyle(false)
+
+func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
+ buf := new(bytes.Buffer)
+ if wrap {
+ buf.Write([]byte(`{"data": `))
+ }
+ err := json.NewEncoder(buf).Encode(v)
+ if err != nil {
+ return nil, err
+ }
+ if wrap {
+ buf.Write([]byte(`}`))
+ }
+ return buf, nil
+}
+
+func getMediaType(media io.Reader) (io.Reader, string) {
+ if typer, ok := media.(ContentTyper); ok {
+ return media, typer.ContentType()
+ }
+
+ pr, pw := io.Pipe()
+ typ := "application/octet-stream"
+ buf, err := ioutil.ReadAll(io.LimitReader(media, 512))
+ if err != nil {
+ pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
+ return pr, typ
+ }
+ typ = http.DetectContentType(buf)
+ mr := io.MultiReader(bytes.NewReader(buf), media)
+ go func() {
+ _, err = io.Copy(pw, mr)
+ if err != nil {
+ pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
+ return
+ }
+ pw.Close()
+ }()
+ return pr, typ
+}
+
+// DetectMediaType detects and returns the content type of the provided media.
+// If the type can not be determined, "application/octet-stream" is returned.
+func DetectMediaType(media io.ReaderAt) string {
+ if typer, ok := media.(ContentTyper); ok {
+ return typer.ContentType()
+ }
+
+ typ := "application/octet-stream"
+ buf := make([]byte, 1024)
+ n, err := media.ReadAt(buf, 0)
+ buf = buf[:n]
+ if err == nil || err == io.EOF {
+ typ = http.DetectContentType(buf)
+ }
+ return typ
+}
+
+type Lengther interface {
+ Len() int
+}
+
+// endingWithErrorReader from r until it returns an error. If the
+// final error from r is io.EOF and e is non-nil, e is used instead.
+type endingWithErrorReader struct {
+ r io.Reader
+ e error
+}
+
+func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
+ n, err = er.r.Read(p)
+ if err == io.EOF && er.e != nil {
+ err = er.e
+ }
+ return
+}
+
+func typeHeader(contentType string) textproto.MIMEHeader {
+ h := make(textproto.MIMEHeader)
+ h.Set("Content-Type", contentType)
+ return h
+}
+
+// countingWriter counts the number of bytes it receives to write, but
+// discards them.
+type countingWriter struct {
+ n *int64
+}
+
+func (w countingWriter) Write(p []byte) (int, error) {
+ *w.n += int64(len(p))
+ return len(p), nil
+}
+
+// ConditionallyIncludeMedia does nothing if media is nil.
+//
+// bodyp is an in/out parameter. It should initially point to the
+// reader of the application/json (or whatever) payload to send in the
+// API request. It's updated to point to the multipart body reader.
+//
+// ctypep is an in/out parameter. It should initially point to the
+// content type of the bodyp, usually "application/json". It's updated
+// to the "multipart/related" content type, with random boundary.
+//
+// The return value is the content-length of the entire multpart body.
+func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) {
+ if media == nil {
+ return
+ }
+ // Get the media type, which might return a different reader instance.
+ var mediaType string
+ media, mediaType = getMediaType(media)
+
+ body, bodyType := *bodyp, *ctypep
+
+ pr, pw := io.Pipe()
+ mpw := multipart.NewWriter(pw)
+ *bodyp = pr
+ *ctypep = "multipart/related; boundary=" + mpw.Boundary()
+ go func() {
+ w, err := mpw.CreatePart(typeHeader(bodyType))
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err))
+ return
+ }
+ _, err = io.Copy(w, body)
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err))
+ return
+ }
+
+ w, err = mpw.CreatePart(typeHeader(mediaType))
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err))
+ return
+ }
+ _, err = io.Copy(w, media)
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err))
+ return
+ }
+ mpw.Close()
+ pw.Close()
+ }()
+ cancel = func() { pw.CloseWithError(errAborted) }
+ return cancel, true
+}
+
+var errAborted = errors.New("googleapi: upload aborted")
+
+// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
+// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
+// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
+type ProgressUpdater func(current, total int64)
+
+// ResumableUpload is used by the generated APIs to provide resumable uploads.
+// It is not used by developers directly.
+type ResumableUpload struct {
+ Client *http.Client
+ // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
+ URI string
+ UserAgent string // User-Agent for header of the request
+ // Media is the object being uploaded.
+ Media io.ReaderAt
+ // MediaType defines the media type, e.g. "image/jpeg".
+ MediaType string
+ // ContentLength is the full size of the object being uploaded.
+ ContentLength int64
+
+ mu sync.Mutex // guards progress
+ progress int64 // number of bytes uploaded so far
+
+ // Callback is an optional function that will be called upon every progress update.
+ Callback ProgressUpdater
+}
+
+var (
+ // rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded.
+ rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`)
+ // chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
+ // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
+ chunkSize int64 = 1 << 18
+)
+
+// Progress returns the number of bytes uploaded at this point.
+func (rx *ResumableUpload) Progress() int64 {
+ rx.mu.Lock()
+ defer rx.mu.Unlock()
+ return rx.progress
+}
+
+func (rx *ResumableUpload) transferStatus(ctx context.Context) (int64, *http.Response, error) {
+ req, _ := http.NewRequest("POST", rx.URI, nil)
+ req.ContentLength = 0
+ req.Header.Set("User-Agent", rx.UserAgent)
+ req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
+ res, err := ctxhttp.Do(ctx, rx.Client, req)
+ if err != nil || res.StatusCode != statusResumeIncomplete {
+ return 0, res, err
+ }
+ var start int64
+ if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 {
+ start, err = strconv.ParseInt(m[1], 10, 64)
+ if err != nil {
+ return 0, nil, fmt.Errorf("unable to parse range size %v", m[1])
+ }
+ start += 1 // Start at the next byte
+ }
+ return start, res, nil
+}
+
+type chunk struct {
+ body io.Reader
+ size int64
+ err error
+}
+
+func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) {
+ start, res, err := rx.transferStatus(ctx)
+ if err != nil || res.StatusCode != statusResumeIncomplete {
+ if err == context.Canceled {
+ return &http.Response{StatusCode: http.StatusRequestTimeout}, err
+ }
+ return res, err
+ }
+
+ for {
+ select { // Check for cancellation
+ case <-ctx.Done():
+ res.StatusCode = http.StatusRequestTimeout
+ return res, ctx.Err()
+ default:
+ }
+ reqSize := rx.ContentLength - start
+ if reqSize > chunkSize {
+ reqSize = chunkSize
+ }
+ r := io.NewSectionReader(rx.Media, start, reqSize)
+ req, _ := http.NewRequest("POST", rx.URI, r)
+ req.ContentLength = reqSize
+ req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
+ req.Header.Set("Content-Type", rx.MediaType)
+ req.Header.Set("User-Agent", rx.UserAgent)
+ res, err = ctxhttp.Do(ctx, rx.Client, req)
+ start += reqSize
+ if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) {
+ rx.mu.Lock()
+ rx.progress = start // keep track of number of bytes sent so far
+ rx.mu.Unlock()
+ if rx.Callback != nil {
+ rx.Callback(start, rx.ContentLength)
+ }
+ }
+ if err != nil || res.StatusCode != statusResumeIncomplete {
+ break
+ }
+ }
+ return res, err
+}
+
+var sleep = time.Sleep // override in unit tests
+
+// Upload starts the process of a resumable upload with a cancellable context.
+// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled.
+// It is called from the auto-generated API code and is not visible to the user.
+// rx is private to the auto-generated API code.
+func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) {
+ var res *http.Response
+ var err error
+ for {
+ res, err = rx.transferChunks(ctx)
+ if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
+ return res, err
+ }
+ select { // Check for cancellation
+ case <-ctx.Done():
+ res.StatusCode = http.StatusRequestTimeout
+ return res, ctx.Err()
+ default:
+ }
+ sleep(uploadPause)
+ }
+ return res, err
+}
+
+func ResolveRelative(basestr, relstr string) string {
+ u, _ := url.Parse(basestr)
+ rel, _ := url.Parse(relstr)
+ u = u.ResolveReference(rel)
+ us := u.String()
+ us = strings.Replace(us, "%7B", "{", -1)
+ us = strings.Replace(us, "%7D", "}", -1)
+ return us
+}
+
+// has4860Fix is whether this Go environment contains the fix for
+// http://golang.org/issue/4860
+var has4860Fix bool
+
+// init initializes has4860Fix by checking the behavior of the net/http package.
+func init() {
+ r := http.Request{
+ URL: &url.URL{
+ Scheme: "http",
+ Opaque: "//opaque",
+ },
+ }
+ b := &bytes.Buffer{}
+ r.Write(b)
+ has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http"))
+}
+
+// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it
+// don't alter any hex-escaped characters in u.Path.
+func SetOpaque(u *url.URL) {
+ u.Opaque = "//" + u.Host + u.Path
+ if !has4860Fix {
+ u.Opaque = u.Scheme + ":" + u.Opaque
+ }
+}
+
+// Expand subsitutes any {encoded} strings in the URL passed in using
+// the map supplied.
+//
+// This calls SetOpaque to avoid encoding of the parameters in the URL path.
+func Expand(u *url.URL, expansions map[string]string) {
+ expanded, err := uritemplates.Expand(u.Path, expansions)
+ if err == nil {
+ u.Path = expanded
+ SetOpaque(u)
+ }
+}
+
+// CloseBody is used to close res.Body.
+// Prior to calling Close, it also tries to Read a small amount to see an EOF.
+// Not seeing an EOF can prevent HTTP Transports from reusing connections.
+func CloseBody(res *http.Response) {
+ if res == nil || res.Body == nil {
+ return
+ }
+ // Justification for 3 byte reads: two for up to "\r\n" after
+ // a JSON/XML document, and then 1 to see EOF if we haven't yet.
+ // TODO(bradfitz): detect Go 1.3+ and skip these reads.
+ // See https://codereview.appspot.com/58240043
+ // and https://codereview.appspot.com/49570044
+ buf := make([]byte, 1)
+ for i := 0; i < 3; i++ {
+ _, err := res.Body.Read(buf)
+ if err != nil {
+ break
+ }
+ }
+ res.Body.Close()
+
+}
+
+// VariantType returns the type name of the given variant.
+// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
+// This is used to support "variant" APIs that can return one of a number of different types.
+func VariantType(t map[string]interface{}) string {
+ s, _ := t["type"].(string)
+ return s
+}
+
+// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
+// This is used to support "variant" APIs that can return one of a number of different types.
+// It reports whether the conversion was successful.
+func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
+ var buf bytes.Buffer
+ err := json.NewEncoder(&buf).Encode(v)
+ if err != nil {
+ return false
+ }
+ return json.Unmarshal(buf.Bytes(), dst) == nil
+}
+
+// A Field names a field to be retrieved with a partial response.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+//
+// Partial responses can dramatically reduce the amount of data that must be sent to your application.
+// In order to request partial responses, you can specify the full list of fields
+// that your application needs by adding the Fields option to your request.
+//
+// Field strings use camelCase with leading lower-case characters to identify fields within the response.
+//
+// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
+// you could request just those fields like this:
+//
+// svc.Events.List().Fields("nextPageToken", "items/id").Do()
+//
+// or if you were also interested in each Item's "Updated" field, you can combine them like this:
+//
+// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
+//
+// More information about field formatting can be found here:
+// https://developers.google.com/+/api/#fields-syntax
+//
+// Another way to find field names is through the Google API explorer:
+// https://developers.google.com/apis-explorer/#p/
+type Field string
+
+// CombineFields combines fields into a single string.
+func CombineFields(s []Field) string {
+ r := make([]string, len(s))
+ for i, v := range s {
+ r[i] = string(v)
+ }
+ return strings.Join(r, ",")
+}
diff --git a/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
new file mode 100644
index 0000000000..de9c88cb65
--- /dev/null
+++ b/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2013 Joshua Tacoma
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
new file mode 100644
index 0000000000..8a84813fe5
--- /dev/null
+++ b/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
@@ -0,0 +1,359 @@
+// Copyright 2013 Joshua Tacoma. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uritemplates is a level 4 implementation of RFC 6570 (URI
+// Template, http://tools.ietf.org/html/rfc6570).
+//
+// To use uritemplates, parse a template string and expand it with a value
+// map:
+//
+// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
+// values := make(map[string]interface{})
+// values["user"] = "jtacoma"
+// values["repo"] = "uritemplates"
+// expanded, _ := template.ExpandString(values)
+// fmt.Printf(expanded)
+//
+package uritemplates
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
+ reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
+ validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
+ hex = []byte("0123456789ABCDEF")
+)
+
+func pctEncode(src []byte) []byte {
+ dst := make([]byte, len(src)*3)
+ for i, b := range src {
+ buf := dst[i*3 : i*3+3]
+ buf[0] = 0x25
+ buf[1] = hex[b/16]
+ buf[2] = hex[b%16]
+ }
+ return dst
+}
+
+func escape(s string, allowReserved bool) (escaped string) {
+ if allowReserved {
+ escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
+ } else {
+ escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
+ }
+ return escaped
+}
+
+// A UriTemplate is a parsed representation of a URI template.
+type UriTemplate struct {
+ raw string
+ parts []templatePart
+}
+
+// Parse parses a URI template string into a UriTemplate object.
+func Parse(rawtemplate string) (template *UriTemplate, err error) {
+ template = new(UriTemplate)
+ template.raw = rawtemplate
+ split := strings.Split(rawtemplate, "{")
+ template.parts = make([]templatePart, len(split)*2-1)
+ for i, s := range split {
+ if i == 0 {
+ if strings.Contains(s, "}") {
+ err = errors.New("unexpected }")
+ break
+ }
+ template.parts[i].raw = s
+ } else {
+ subsplit := strings.Split(s, "}")
+ if len(subsplit) != 2 {
+ err = errors.New("malformed template")
+ break
+ }
+ expression := subsplit[0]
+ template.parts[i*2-1], err = parseExpression(expression)
+ if err != nil {
+ break
+ }
+ template.parts[i*2].raw = subsplit[1]
+ }
+ }
+ if err != nil {
+ template = nil
+ }
+ return template, err
+}
+
+type templatePart struct {
+ raw string
+ terms []templateTerm
+ first string
+ sep string
+ named bool
+ ifemp string
+ allowReserved bool
+}
+
+type templateTerm struct {
+ name string
+ explode bool
+ truncate int
+}
+
+func parseExpression(expression string) (result templatePart, err error) {
+ switch expression[0] {
+ case '+':
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ case '.':
+ result.first = "."
+ result.sep = "."
+ expression = expression[1:]
+ case '/':
+ result.first = "/"
+ result.sep = "/"
+ expression = expression[1:]
+ case ';':
+ result.first = ";"
+ result.sep = ";"
+ result.named = true
+ expression = expression[1:]
+ case '?':
+ result.first = "?"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '&':
+ result.first = "&"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '#':
+ result.first = "#"
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ default:
+ result.sep = ","
+ }
+ rawterms := strings.Split(expression, ",")
+ result.terms = make([]templateTerm, len(rawterms))
+ for i, raw := range rawterms {
+ result.terms[i], err = parseTerm(raw)
+ if err != nil {
+ break
+ }
+ }
+ return result, err
+}
+
+func parseTerm(term string) (result templateTerm, err error) {
+ if strings.HasSuffix(term, "*") {
+ result.explode = true
+ term = term[:len(term)-1]
+ }
+ split := strings.Split(term, ":")
+ if len(split) == 1 {
+ result.name = term
+ } else if len(split) == 2 {
+ result.name = split[0]
+ var parsed int64
+ parsed, err = strconv.ParseInt(split[1], 10, 0)
+ result.truncate = int(parsed)
+ } else {
+ err = errors.New("multiple colons in same term")
+ }
+ if !validname.MatchString(result.name) {
+ err = errors.New("not a valid name: " + result.name)
+ }
+ if result.explode && result.truncate > 0 {
+ err = errors.New("both explode and prefix modifers on same term")
+ }
+ return result, err
+}
+
+// Expand expands a URI template with a set of values to produce a string.
+func (self *UriTemplate) Expand(value interface{}) (string, error) {
+ values, ismap := value.(map[string]interface{})
+ if !ismap {
+ if m, ismap := struct2map(value); !ismap {
+ return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
+ } else {
+ return self.Expand(m)
+ }
+ }
+ var buf bytes.Buffer
+ for _, p := range self.parts {
+ err := p.expand(&buf, values)
+ if err != nil {
+ return "", err
+ }
+ }
+ return buf.String(), nil
+}
+
+func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
+ if len(self.raw) > 0 {
+ buf.WriteString(self.raw)
+ return nil
+ }
+ var zeroLen = buf.Len()
+ buf.WriteString(self.first)
+ var firstLen = buf.Len()
+ for _, term := range self.terms {
+ value, exists := values[term.name]
+ if !exists {
+ continue
+ }
+ if buf.Len() != firstLen {
+ buf.WriteString(self.sep)
+ }
+ switch v := value.(type) {
+ case string:
+ self.expandString(buf, term, v)
+ case []interface{}:
+ self.expandArray(buf, term, v)
+ case map[string]interface{}:
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, v)
+ default:
+ if m, ismap := struct2map(value); ismap {
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, m)
+ } else {
+ str := fmt.Sprintf("%v", value)
+ self.expandString(buf, term, str)
+ }
+ }
+ }
+ if buf.Len() == firstLen {
+ original := buf.Bytes()[:zeroLen]
+ buf.Reset()
+ buf.Write(original)
+ }
+ return nil
+}
+
+func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
+ if self.named {
+ buf.WriteString(name)
+ if empty {
+ buf.WriteString(self.ifemp)
+ } else {
+ buf.WriteString("=")
+ }
+ }
+}
+
+func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ self.expandName(buf, t.name, len(s) == 0)
+ buf.WriteString(escape(s, self.allowReserved))
+}
+
+func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
+ if len(a) == 0 {
+ return
+ } else if !t.explode {
+ self.expandName(buf, t.name, false)
+ }
+ for i, value := range a {
+ if t.explode && i > 0 {
+ buf.WriteString(self.sep)
+ } else if i > 0 {
+ buf.WriteString(",")
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ if self.named && t.explode {
+ self.expandName(buf, t.name, len(s) == 0)
+ }
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+}
+
+func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
+ if len(m) == 0 {
+ return
+ }
+ if !t.explode {
+ self.expandName(buf, t.name, len(m) == 0)
+ }
+ var firstLen = buf.Len()
+ for k, value := range m {
+ if firstLen != buf.Len() {
+ if t.explode {
+ buf.WriteString(self.sep)
+ } else {
+ buf.WriteString(",")
+ }
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if t.explode {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune('=')
+ buf.WriteString(escape(s, self.allowReserved))
+ } else {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune(',')
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+ }
+}
+
+func struct2map(v interface{}) (map[string]interface{}, bool) {
+ value := reflect.ValueOf(v)
+ switch value.Type().Kind() {
+ case reflect.Ptr:
+ return struct2map(value.Elem().Interface())
+ case reflect.Struct:
+ m := make(map[string]interface{})
+ for i := 0; i < value.NumField(); i++ {
+ tag := value.Type().Field(i).Tag
+ var name string
+ if strings.Contains(string(tag), ":") {
+ name = tag.Get("uri")
+ } else {
+ name = strings.TrimSpace(string(tag))
+ }
+ if len(name) == 0 {
+ name = value.Type().Field(i).Name
+ }
+ m[name] = value.Field(i).Interface()
+ }
+ return m, true
+ }
+ return nil, false
+}
diff --git a/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go
new file mode 100644
index 0000000000..399ef46236
--- /dev/null
+++ b/vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go
@@ -0,0 +1,13 @@
+package uritemplates
+
+func Expand(path string, expansions map[string]string) (string, error) {
+ template, err := Parse(path)
+ if err != nil {
+ return "", err
+ }
+ values := make(map[string]interface{})
+ for k, v := range expansions {
+ values[k] = v
+ }
+ return template.Expand(values)
+}
diff --git a/vendor/src/google.golang.org/api/googleapi/types.go b/vendor/src/google.golang.org/api/googleapi/types.go
new file mode 100644
index 0000000000..a02b4b0716
--- /dev/null
+++ b/vendor/src/google.golang.org/api/googleapi/types.go
@@ -0,0 +1,182 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package googleapi
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// Int64s is a slice of int64s that marshal as quoted strings in JSON.
+type Int64s []int64
+
+func (q *Int64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, int64(v))
+ }
+ return nil
+}
+
+// Int32s is a slice of int32s that marshal as quoted strings in JSON.
+type Int32s []int32
+
+func (q *Int32s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, int32(v))
+ }
+ return nil
+}
+
+// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
+type Uint64s []uint64
+
+func (q *Uint64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, uint64(v))
+ }
+ return nil
+}
+
+// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
+type Uint32s []uint32
+
+func (q *Uint32s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, uint32(v))
+ }
+ return nil
+}
+
+// Float64s is a slice of float64s that marshal as quoted strings in JSON.
+type Float64s []float64
+
+func (q *Float64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, float64(v))
+ }
+ return nil
+}
+
+func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
+ dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
+ dst = append(dst, '[')
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ dst = append(dst, ',')
+ }
+ dst = append(dst, '"')
+ dst = fn(dst, i)
+ dst = append(dst, '"')
+ }
+ dst = append(dst, ']')
+ return dst, nil
+}
+
+func (s Int64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendInt(dst, s[i], 10)
+ })
+}
+
+func (s Int32s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendInt(dst, int64(s[i]), 10)
+ })
+}
+
+func (s Uint64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendUint(dst, s[i], 10)
+ })
+}
+
+func (s Uint32s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendUint(dst, uint64(s[i]), 10)
+ })
+}
+
+func (s Float64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
+ })
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json b/vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json
new file mode 100644
index 0000000000..48a141e712
--- /dev/null
+++ b/vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json
@@ -0,0 +1,1692 @@
+{
+ "kind": "discovery#restDescription",
+ "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/JyzTrhH0rHlFw8M4zq31tTXViEA\"",
+ "discoveryVersion": "v1",
+ "id": "logging:v1beta3",
+ "name": "logging",
+ "version": "v1beta3",
+ "revision": "20151109",
+ "title": "Google Cloud Logging API",
+ "description": "The Google Cloud Logging API lets you write log entries and manage your logs, log sinks and logs-based metrics.",
+ "ownerDomain": "google.com",
+ "ownerName": "Google",
+ "icons": {
+ "x16": "http://www.google.com/images/icons/product/search-16.gif",
+ "x32": "http://www.google.com/images/icons/product/search-32.gif"
+ },
+ "documentationLink": "https://cloud.google.com/logging/docs/",
+ "protocol": "rest",
+ "baseUrl": "https://logging.googleapis.com/",
+ "basePath": "/",
+ "rootUrl": "https://logging.googleapis.com/",
+ "servicePath": "",
+ "batchPath": "batch",
+ "parameters": {
+ "access_token": {
+ "type": "string",
+ "description": "OAuth access token.",
+ "location": "query"
+ },
+ "alt": {
+ "type": "string",
+ "description": "Data format for response.",
+ "default": "json",
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json",
+ "Media download with context-dependent Content-Type",
+ "Responses with Content-Type of application/x-protobuf"
+ ],
+ "location": "query"
+ },
+ "bearer_token": {
+ "type": "string",
+ "description": "OAuth bearer token.",
+ "location": "query"
+ },
+ "callback": {
+ "type": "string",
+ "description": "JSONP",
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ },
+ "pp": {
+ "type": "boolean",
+ "description": "Pretty-print response.",
+ "default": "true",
+ "location": "query"
+ },
+ "prettyPrint": {
+ "type": "boolean",
+ "description": "Returns response with indentations and line breaks.",
+ "default": "true",
+ "location": "query"
+ },
+ "quotaUser": {
+ "type": "string",
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
+ "location": "query"
+ },
+ "upload_protocol": {
+ "type": "string",
+ "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
+ "location": "query"
+ },
+ "uploadType": {
+ "type": "string",
+ "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
+ "location": "query"
+ },
+ "$.xgafv": {
+ "type": "string",
+ "description": "V1 error format.",
+ "enumDescriptions": [
+ "v1 error format",
+ "v2 error format"
+ ],
+ "location": "query"
+ }
+ },
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://www.googleapis.com/auth/cloud-platform": {
+ "description": "View and manage your data across Google Cloud Platform services"
+ },
+ "https://www.googleapis.com/auth/cloud-platform.read-only": {
+ "description": "View your data across Google Cloud Platform services"
+ },
+ "https://www.googleapis.com/auth/logging.admin": {
+ "description": "Administrate log data for your projects"
+ },
+ "https://www.googleapis.com/auth/logging.read": {
+ "description": "View log data for your projects"
+ },
+ "https://www.googleapis.com/auth/logging.write": {
+ "description": "Submit log data for your projects"
+ }
+ }
+ }
+ },
+ "schemas": {
+ "ListLogsResponse": {
+ "id": "ListLogsResponse",
+ "type": "object",
+ "description": "Result returned from ListLogs.",
+ "properties": {
+ "logs": {
+ "type": "array",
+ "description": "A list of log descriptions matching the criteria.",
+ "items": {
+ "$ref": "Log"
+ }
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "If there are more results, then `nextPageToken` is returned in the response. To get the next batch of logs, use the value of `nextPageToken` as `pageToken` in the next call of `ListLogs`. If `nextPageToken` is empty, then there are no more results."
+ }
+ }
+ },
+ "Log": {
+ "id": "Log",
+ "type": "object",
+ "description": "_Output only._ Describes a log, which is a named stream of log entries.",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The resource name of the log. Example: `\"/projects/my-gcp-project-id/logs/LOG_NAME\"`, where `LOG_NAME` is the URL-encoded given name of the log. The log includes those log entries whose `LogEntry.log` field contains this given name. To avoid name collisions, it is a best practice to prefix the given log name with the service name, but this is not required. Examples of log given names: `\"appengine.googleapis.com/request_log\"`, `\"apache-access\"`."
+ },
+ "displayName": {
+ "type": "string",
+ "description": "_Optional._ The common name of the log. Example: `\"request_log\"`."
+ },
+ "payloadType": {
+ "type": "string",
+ "description": "_Optional_. A URI representing the expected payload type for log entries."
+ }
+ }
+ },
+ "Empty": {
+ "id": "Empty",
+ "type": "object",
+ "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`."
+ },
+ "WriteLogEntriesRequest": {
+ "id": "WriteLogEntriesRequest",
+ "type": "object",
+ "description": "The parameters to WriteLogEntries.",
+ "properties": {
+ "commonLabels": {
+ "type": "object",
+ "description": "Metadata labels that apply to all log entries in this request, so that you don't have to repeat them in each log entry's `metadata.labels` field. If any of the log entries contains a (key, value) with the same key that is in `commonLabels`, then the entry's (key, value) overrides the one in `commonLabels`.",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "entries": {
+ "type": "array",
+ "description": "Log entries to insert.",
+ "items": {
+ "$ref": "LogEntry"
+ }
+ }
+ }
+ },
+ "LogEntry": {
+ "id": "LogEntry",
+ "type": "object",
+ "description": "An individual entry in a log.",
+ "properties": {
+ "metadata": {
+ "$ref": "LogEntryMetadata",
+ "description": "Information about the log entry."
+ },
+ "protoPayload": {
+ "type": "object",
+ "description": "The log entry payload, represented as a protocol buffer that is expressed as a JSON object. You can only pass `protoPayload` values that belong to a set of approved types.",
+ "additionalProperties": {
+ "type": "any",
+ "description": "Properties of the object. Contains field @ype with type URL."
+ }
+ },
+ "textPayload": {
+ "type": "string",
+ "description": "The log entry payload, represented as a Unicode string (UTF-8)."
+ },
+ "structPayload": {
+ "type": "object",
+ "description": "The log entry payload, represented as a structure that is expressed as a JSON object.",
+ "additionalProperties": {
+ "type": "any",
+ "description": "Properties of the object."
+ }
+ },
+ "insertId": {
+ "type": "string",
+ "description": "A unique ID for the log entry. If you provide this field, the logging service considers other log entries in the same log with the same ID as duplicates which can be removed."
+ },
+ "log": {
+ "type": "string",
+ "description": "The log to which this entry belongs. When a log entry is ingested, the value of this field is set by the logging system."
+ },
+ "httpRequest": {
+ "$ref": "HttpRequest",
+ "description": "Information about the HTTP request associated with this log entry, if applicable."
+ }
+ }
+ },
+ "LogEntryMetadata": {
+ "id": "LogEntryMetadata",
+ "type": "object",
+ "description": "Additional data that is associated with a log entry, set by the service creating the log entry.",
+ "properties": {
+ "timestamp": {
+ "type": "string",
+ "description": "The time the event described by the log entry occurred. Timestamps must be later than January 1, 1970."
+ },
+ "severity": {
+ "type": "string",
+ "description": "The severity of the log entry.",
+ "enum": [
+ "DEFAULT",
+ "DEBUG",
+ "INFO",
+ "NOTICE",
+ "WARNING",
+ "ERROR",
+ "CRITICAL",
+ "ALERT",
+ "EMERGENCY"
+ ]
+ },
+ "projectId": {
+ "type": "string",
+ "description": "The project ID of the Google Cloud Platform service that created the log entry."
+ },
+ "serviceName": {
+ "type": "string",
+ "description": "The API name of the Google Cloud Platform service that created the log entry. For example, `\"compute.googleapis.com\"`."
+ },
+ "region": {
+ "type": "string",
+ "description": "The region name of the Google Cloud Platform service that created the log entry. For example, `\"us-central1\"`."
+ },
+ "zone": {
+ "type": "string",
+ "description": "The zone of the Google Cloud Platform service that created the log entry. For example, `\"us-central1-a\"`."
+ },
+ "userId": {
+ "type": "string",
+ "description": "The fully-qualified email address of the authenticated user that performed or requested the action represented by the log entry. If the log entry does not apply to an action taken by an authenticated user, then the field should be empty."
+ },
+ "labels": {
+ "type": "object",
+ "description": "A set of (key, value) data that provides additional information about the log entry. If the log entry is from one of the Google Cloud Platform sources listed below, the indicated (key, value) information must be provided: Google App Engine, service_name `appengine.googleapis.com`: \"appengine.googleapis.com/module_id\", \"appengine.googleapis.com/version_id\", and one of: \"appengine.googleapis.com/replica_index\", \"appengine.googleapis.com/clone_id\", or else provide the following Compute Engine labels: Google Compute Engine, service_name `compute.googleapis.com`: \"compute.googleapis.com/resource_type\", \"instance\" \"compute.googleapis.com/resource_id\",",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "HttpRequest": {
+ "id": "HttpRequest",
+ "type": "object",
+ "description": "A common proto for logging HTTP requests.",
+ "properties": {
+ "requestMethod": {
+ "type": "string",
+ "description": "Request method, such as `GET`, `HEAD`, `PUT` or `POST`."
+ },
+ "requestUrl": {
+ "type": "string",
+ "description": "Contains the scheme (http|https), the host name, the path and the query portion of the URL that was requested."
+ },
+ "requestSize": {
+ "type": "string",
+ "description": "Size of the HTTP request message in bytes, including request headers and the request body.",
+ "format": "int64"
+ },
+ "status": {
+ "type": "integer",
+ "description": "A response code indicates the status of response, e.g., 200.",
+ "format": "int32"
+ },
+ "responseSize": {
+ "type": "string",
+ "description": "Size of the HTTP response message in bytes sent back to the client, including response headers and response body.",
+ "format": "int64"
+ },
+ "userAgent": {
+ "type": "string",
+ "description": "User agent sent by the client, e.g., \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\"."
+ },
+ "remoteIp": {
+ "type": "string",
+ "description": "IP address of the client who issues the HTTP request. Could be either IPv4 or IPv6."
+ },
+ "referer": {
+ "type": "string",
+ "description": "Referer (a.k.a. referrer) URL of request, as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html."
+ },
+ "cacheHit": {
+ "type": "boolean",
+ "description": "Whether or not an entity was served from cache (with or without validation)."
+ },
+ "validatedWithOriginServer": {
+ "type": "boolean",
+ "description": "Whether or not the response was validated with the origin server before being served from cache. This field is only meaningful if cache_hit is True."
+ }
+ }
+ },
+ "WriteLogEntriesResponse": {
+ "id": "WriteLogEntriesResponse",
+ "type": "object",
+ "description": "Result returned from WriteLogEntries. empty"
+ },
+ "ListLogServicesResponse": {
+ "id": "ListLogServicesResponse",
+ "type": "object",
+ "description": "Result returned from `ListLogServicesRequest`.",
+ "properties": {
+ "logServices": {
+ "type": "array",
+ "description": "A list of log services.",
+ "items": {
+ "$ref": "LogService"
+ }
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "If there are more results, then `nextPageToken` is returned in the response. To get the next batch of services, use the value of `nextPageToken` as `pageToken` in the next call of `ListLogServices`. If `nextPageToken` is empty, then there are no more results."
+ }
+ }
+ },
+ "LogService": {
+ "id": "LogService",
+ "type": "object",
+ "description": "_Output only._ Describes a service that writes log entries.",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The service's name. Example: `\"appengine.googleapis.com\"`. Log names beginning with this string are reserved for this service. This value can appear in the `LogEntry.metadata.serviceName` field of log entries associated with this log service."
+ },
+ "indexKeys": {
+ "type": "array",
+ "description": "A list of the names of the keys used to index and label individual log entries from this service. The first two keys are used as the primary and secondary index, respectively. Additional keys may be used to label the entries. For example, App Engine indexes its entries by module and by version, so its `indexKeys` field is the following: [ \"appengine.googleapis.com/module_id\", \"appengine.googleapis.com/version_id\" ]",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "ListLogServiceIndexesResponse": {
+ "id": "ListLogServiceIndexesResponse",
+ "type": "object",
+ "description": "Result returned from ListLogServiceIndexesRequest.",
+ "properties": {
+ "serviceIndexPrefixes": {
+ "type": "array",
+ "description": "A list of log service index values. Each index value has the form `\"/value1/value2/...\"`, where `value1` is a value in the primary index, `value2` is a value in the secondary index, and so forth.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "If there are more results, then `nextPageToken` is returned in the response. To get the next batch of indexes, use the value of `nextPageToken` as `pageToken` in the next call of `ListLogServiceIndexes`. If `nextPageToken` is empty, then there are no more results."
+ }
+ }
+ },
+ "ListLogSinksResponse": {
+ "id": "ListLogSinksResponse",
+ "type": "object",
+ "description": "Result returned from `ListLogSinks`.",
+ "properties": {
+ "sinks": {
+ "type": "array",
+ "description": "The requested log sinks. If a returned `LogSink` object has an empty `destination` field, the client can retrieve the complete `LogSink` object by calling `log.sinks.get`.",
+ "items": {
+ "$ref": "LogSink"
+ }
+ }
+ }
+ },
+ "LogSink": {
+ "id": "LogSink",
+ "type": "object",
+ "description": "Describes where log entries are written outside of Cloud Logging.",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The client-assigned name of this sink. For example, `\"my-syslog-sink\"`. The name must be unique among the sinks of a similar kind in the project."
+ },
+ "destination": {
+ "type": "string",
+ "description": "The resource name of the destination. Cloud Logging writes designated log entries to this destination. For example, `\"storage.googleapis.com/my-output-bucket\"`."
+ },
+ "filter": {
+ "type": "string",
+ "description": "An advanced logs filter. If present, only log entries matching the filter are written. Only project sinks use this field; log sinks and log service sinks must not include a filter."
+ },
+ "errors": {
+ "type": "array",
+ "description": "_Output only._ If any errors occur when invoking a sink method, then this field contains descriptions of the errors.",
+ "items": {
+ "$ref": "LogError"
+ }
+ }
+ }
+ },
+ "LogError": {
+ "id": "LogError",
+ "type": "object",
+ "description": "Describes a problem with a logging resource or operation.",
+ "properties": {
+ "resource": {
+ "type": "string",
+ "description": "A resource name associated with this error. For example, the name of a Cloud Storage bucket that has insufficient permissions to be a destination for log entries."
+ },
+ "status": {
+ "$ref": "Status",
+ "description": "The error description, including a classification code, an error message, and other details."
+ },
+ "timeNanos": {
+ "type": "string",
+ "description": "The time the error was observed, in nanoseconds since the Unix epoch.",
+ "format": "int64"
+ }
+ }
+ },
+ "Status": {
+ "id": "Status",
+ "type": "object",
+ "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). The error model is designed to be: - Simple to use and understand for most users - Flexible enough to meet unexpected needs # Overview The `Status` message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers *understand* and *resolve* the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package `google.rpc` which can be used for common error conditions. # Language mapping The `Status` message is the logical representation of the error model, but it is not necessarily the actual wire format. When the `Status` message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C. # Other uses The error model and the `Status` message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments. Example uses of this error model include: - Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors. - Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose. - Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response. - Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message. - Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.",
+ "properties": {
+ "code": {
+ "type": "integer",
+ "description": "The status code, which should be an enum value of google.rpc.Code.",
+ "format": "int32"
+ },
+ "message": {
+ "type": "string",
+ "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client."
+ },
+ "details": {
+ "type": "array",
+ "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.",
+ "items": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "any",
+ "description": "Properties of the object. Contains field @ype with type URL."
+ }
+ }
+ }
+ }
+ },
+ "ListLogServiceSinksResponse": {
+ "id": "ListLogServiceSinksResponse",
+ "type": "object",
+ "description": "Result returned from `ListLogServiceSinks`.",
+ "properties": {
+ "sinks": {
+ "type": "array",
+ "description": "The requested log service sinks. If a returned `LogSink` object has an empty `destination` field, the client can retrieve the complete `LogSink` object by calling `logServices.sinks.get`.",
+ "items": {
+ "$ref": "LogSink"
+ }
+ }
+ }
+ },
+ "ListSinksResponse": {
+ "id": "ListSinksResponse",
+ "type": "object",
+ "description": "Result returned from `ListSinks`.",
+ "properties": {
+ "sinks": {
+ "type": "array",
+ "description": "The requested sinks. If a returned `LogSink` object has an empty `destination` field, the client can retrieve the complete `LogSink` object by calling `projects.sinks.get`.",
+ "items": {
+ "$ref": "LogSink"
+ }
+ }
+ }
+ },
+ "ListLogMetricsResponse": {
+ "id": "ListLogMetricsResponse",
+ "type": "object",
+ "description": "Result returned from ListLogMetrics.",
+ "properties": {
+ "metrics": {
+ "type": "array",
+ "description": "The list of metrics that was requested.",
+ "items": {
+ "$ref": "LogMetric"
+ }
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "If there are more results, then `nextPageToken` is returned in the response. To get the next batch of entries, use the value of `nextPageToken` as `pageToken` in the next call of `ListLogMetrics`. If `nextPageToken` is empty, then there are no more results."
+ }
+ }
+ },
+ "LogMetric": {
+ "id": "LogMetric",
+ "type": "object",
+ "description": "Describes a logs-based metric. The value of the metric is the number of log entries in your project that match a logs filter.",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The client-assigned name for this metric, such as `\"severe_errors\"`. Metric names are limited to 1000 characters and can include only the following characters: `A-Z`, `a-z`, `0-9`, and the special characters `_-.,+!*',()%/\\`. The slash character (`/`) denotes a hierarchy of name pieces, and it cannot be the first character of the name."
+ },
+ "description": {
+ "type": "string",
+ "description": "A description of this metric."
+ },
+ "filter": {
+ "type": "string",
+ "description": "An [advanced logs filter](/logging/docs/view/advanced_filters). Example: `\"log:syslog AND metadata.severity\u003e=ERROR\"`."
+ }
+ }
+ },
+ "RequestLog": {
+ "id": "RequestLog",
+ "type": "object",
+ "description": "Complete log information about a single request to an application.",
+ "properties": {
+ "appId": {
+ "type": "string",
+ "description": "Identifies the application that handled this request."
+ },
+ "moduleId": {
+ "type": "string",
+ "description": "Identifies the module of the application that handled this request."
+ },
+ "versionId": {
+ "type": "string",
+ "description": "Version of the application that handled this request."
+ },
+ "requestId": {
+ "type": "string",
+ "description": "Globally unique identifier for a request, based on request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier."
+ },
+ "ip": {
+ "type": "string",
+ "description": "Origin IP address."
+ },
+ "startTime": {
+ "type": "string",
+ "description": "Time at which request was known to have begun processing."
+ },
+ "endTime": {
+ "type": "string",
+ "description": "Time at which request was known to end processing."
+ },
+ "latency": {
+ "type": "string",
+ "description": "Latency of the request."
+ },
+ "megaCycles": {
+ "type": "string",
+ "description": "Number of CPU megacycles used to process request.",
+ "format": "int64"
+ },
+ "method": {
+ "type": "string",
+ "description": "Request method, such as `GET`, `HEAD`, `PUT`, `POST`, or `DELETE`."
+ },
+ "resource": {
+ "type": "string",
+ "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". Any trailing fragment (separated by a '#' character) will not be included."
+ },
+ "httpVersion": {
+ "type": "string",
+ "description": "HTTP version of request."
+ },
+ "status": {
+ "type": "integer",
+ "description": "Response status of request.",
+ "format": "int32"
+ },
+ "responseSize": {
+ "type": "string",
+ "description": "Size in bytes sent back to client by request.",
+ "format": "int64"
+ },
+ "referrer": {
+ "type": "string",
+ "description": "Referrer URL of request."
+ },
+ "userAgent": {
+ "type": "string",
+ "description": "User agent used for making request."
+ },
+ "nickname": {
+ "type": "string",
+ "description": "A string that identifies a logged-in user who made this request, or empty if the user is not logged in. Most likely, this is the part of the user's email before the '@' sign. The field value is the same for different requests from the same user, but different users may have a similar name. This information is also available to the application via Users API. This field will be populated starting with App Engine 1.9.21."
+ },
+ "urlMapEntry": {
+ "type": "string",
+ "description": "File or class within URL mapping used for request. Useful for tracking down the source code which was responsible for managing request. Especially for multiply mapped handlers."
+ },
+ "host": {
+ "type": "string",
+ "description": "The Internet host and port number of the resource being requested."
+ },
+ "cost": {
+ "type": "number",
+ "description": "An indication of the relative cost of serving this request.",
+ "format": "double"
+ },
+ "taskQueueName": {
+ "type": "string",
+ "description": "Queue name of the request (for an offline request)."
+ },
+ "taskName": {
+ "type": "string",
+ "description": "Task name of the request (for an offline request)."
+ },
+ "wasLoadingRequest": {
+ "type": "boolean",
+ "description": "Was this request a loading request for this instance?"
+ },
+ "pendingTime": {
+ "type": "string",
+ "description": "Time this request spent in the pending request queue, if it was pending at all."
+ },
+ "instanceIndex": {
+ "type": "integer",
+ "description": "If the instance that processed this request was individually addressable (i.e. belongs to a manually scaled module), this is the index of the instance.",
+ "format": "int32"
+ },
+ "finished": {
+ "type": "boolean",
+ "description": "If true, represents a finished request. Otherwise, the request is active."
+ },
+ "instanceId": {
+ "type": "string",
+ "description": "An opaque identifier for the instance that handled the request."
+ },
+ "line": {
+ "type": "array",
+ "description": "List of log lines emitted by the application while serving this request, if requested.",
+ "items": {
+ "$ref": "LogLine"
+ }
+ },
+ "appEngineRelease": {
+ "type": "string",
+ "description": "App Engine release version string."
+ },
+ "traceId": {
+ "type": "string",
+ "description": "Cloud Trace identifier of the trace for this request."
+ },
+ "sourceReference": {
+ "type": "array",
+ "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.",
+ "items": {
+ "$ref": "SourceReference"
+ }
+ }
+ }
+ },
+ "LogLine": {
+ "id": "LogLine",
+ "type": "object",
+ "description": "Application log line emitted while processing a request.",
+ "properties": {
+ "time": {
+ "type": "string",
+ "description": "Time when log entry was made. May be inaccurate."
+ },
+ "severity": {
+ "type": "string",
+ "description": "Severity of log.",
+ "enum": [
+ "DEFAULT",
+ "DEBUG",
+ "INFO",
+ "NOTICE",
+ "WARNING",
+ "ERROR",
+ "CRITICAL",
+ "ALERT",
+ "EMERGENCY"
+ ]
+ },
+ "logMessage": {
+ "type": "string",
+ "description": "App provided log message."
+ },
+ "sourceLocation": {
+ "$ref": "SourceLocation",
+ "description": "Line of code that generated this log message."
+ }
+ }
+ },
+ "SourceLocation": {
+ "id": "SourceLocation",
+ "type": "object",
+ "description": "Specifies a location in a source file.",
+ "properties": {
+ "file": {
+ "type": "string",
+ "description": "Source file name. May or may not be a fully qualified name, depending on the runtime environment."
+ },
+ "line": {
+ "type": "string",
+ "description": "Line within the source file.",
+ "format": "int64"
+ },
+ "functionName": {
+ "type": "string",
+ "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name, for use in contexts such as the logs viewer where file:line number is less meaningful. This may vary by language, for example: in Java: qual.if.ied.Class.method in Go: dir/package.func in Python: function ..."
+ }
+ }
+ },
+ "SourceReference": {
+ "id": "SourceReference",
+ "type": "object",
+ "description": "A reference to a particular snapshot of the source tree used to build and deploy an application.",
+ "properties": {
+ "repository": {
+ "type": "string",
+ "description": "Optional. A URI string identifying the repository. Example: \"https://github.com/GoogleCloudPlatform/kubernetes.git\""
+ },
+ "revisionId": {
+ "type": "string",
+ "description": "The canonical (and persistent) identifier of the deployed revision. Example (git): \"0035781c50ec7aa23385dc841529ce8a4b70db1b\""
+ }
+ }
+ }
+ },
+ "resources": {
+ "projects": {
+ "resources": {
+ "logs": {
+ "methods": {
+ "list": {
+ "id": "logging.projects.logs.list",
+ "path": "v1beta3/projects/{projectsId}/logs",
+ "httpMethod": "GET",
+ "description": "Lists the logs in the project. Only logs that have entries are listed.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `projectName`. The resource name of the project whose logs are requested. If both `serviceName` and `serviceIndexPrefix` are empty, then all logs with entries in this project are listed.",
+ "required": true,
+ "location": "path"
+ },
+ "serviceName": {
+ "type": "string",
+ "description": "If not empty, this field must be a log service name such as `\"compute.googleapis.com\"`. Only logs associated with that that log service are listed.",
+ "location": "query"
+ },
+ "serviceIndexPrefix": {
+ "type": "string",
+ "description": "The purpose of this field is to restrict the listed logs to those with entries of a certain kind. If `serviceName` is the name of a log service, then this field may contain values for the log service's indexes. Only logs that have entries whose indexes include the values are listed. The format for this field is `\"/val1/val2.../valN\"`, where `val1` is a value for the first index, `val2` for the second index, etc. An empty value (a single slash) for an index matches all values, and you can omit values for later indexes entirely.",
+ "location": "query"
+ },
+ "pageSize": {
+ "type": "integer",
+ "description": "The maximum number of results to return.",
+ "format": "int32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogs` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogs` operation is continued.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "projectsId"
+ ],
+ "response": {
+ "$ref": "ListLogsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "delete": {
+ "id": "logging.projects.logs.delete",
+ "path": "v1beta3/projects/{projectsId}/logs/{logsId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes a log and all its log entries. The log will reappear if it receives new entries.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `logName`. The resource name of the log to be deleted.",
+ "required": true,
+ "location": "path"
+ },
+ "logsId": {
+ "type": "string",
+ "description": "Part of `logName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logsId"
+ ],
+ "response": {
+ "$ref": "Empty"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ }
+ },
+ "resources": {
+ "entries": {
+ "methods": {
+ "write": {
+ "id": "logging.projects.logs.entries.write",
+ "path": "v1beta3/projects/{projectsId}/logs/{logsId}/entries:write",
+ "httpMethod": "POST",
+ "description": "Writes log entries to Cloud Logging. Each entry consists of a `LogEntry` object. You must fill in all the fields of the object, including one of the payload fields. You may supply a map, `commonLabels`, that holds default (key, value) data for the `entries[].metadata.labels` map in each entry, saving you the trouble of creating identical copies for each entry.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `logName`. The resource name of the log that will receive the log entries.",
+ "required": true,
+ "location": "path"
+ },
+ "logsId": {
+ "type": "string",
+ "description": "Part of `logName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logsId"
+ ],
+ "request": {
+ "$ref": "WriteLogEntriesRequest"
+ },
+ "response": {
+ "$ref": "WriteLogEntriesResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.write"
+ ]
+ }
+ }
+ },
+ "sinks": {
+ "methods": {
+ "list": {
+ "id": "logging.projects.logs.sinks.list",
+ "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks",
+ "httpMethod": "GET",
+ "description": "Lists log sinks associated with a log.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `logName`. The log whose sinks are wanted. For example, `\"compute.google.com/syslog\"`.",
+ "required": true,
+ "location": "path"
+ },
+ "logsId": {
+ "type": "string",
+ "description": "Part of `logName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logsId"
+ ],
+ "response": {
+ "$ref": "ListLogSinksResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "get": {
+ "id": "logging.projects.logs.sinks.get",
+ "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}",
+ "httpMethod": "GET",
+ "description": "Gets a log sink.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the log sink to return.",
+ "required": true,
+ "location": "path"
+ },
+ "logsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logsId",
+ "sinksId"
+ ],
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "create": {
+ "id": "logging.projects.logs.sinks.create",
+ "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks",
+ "httpMethod": "POST",
+ "description": "Creates a log sink. All log entries for a specified log are written to the destination.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `logName`. The resource name of the log to which to the sink is bound.",
+ "required": true,
+ "location": "path"
+ },
+ "logsId": {
+ "type": "string",
+ "description": "Part of `logName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logsId"
+ ],
+ "request": {
+ "$ref": "LogSink"
+ },
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ },
+ "update": {
+ "id": "logging.projects.logs.sinks.update",
+ "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}",
+ "httpMethod": "PUT",
+ "description": "Updates a log sink. If the sink does not exist, it is created.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the sink to update.",
+ "required": true,
+ "location": "path"
+ },
+ "logsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logsId",
+ "sinksId"
+ ],
+ "request": {
+ "$ref": "LogSink"
+ },
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ },
+ "delete": {
+ "id": "logging.projects.logs.sinks.delete",
+ "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes a log sink. After deletion, no new log entries are written to the destination.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the log sink to delete.",
+ "required": true,
+ "location": "path"
+ },
+ "logsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logsId",
+ "sinksId"
+ ],
+ "response": {
+ "$ref": "Empty"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "logServices": {
+ "methods": {
+ "list": {
+ "id": "logging.projects.logServices.list",
+ "path": "v1beta3/projects/{projectsId}/logServices",
+ "httpMethod": "GET",
+ "description": "Lists the log services that have log entries in this project.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `projectName`. The resource name of the project whose services are to be listed.",
+ "required": true,
+ "location": "path"
+ },
+ "pageSize": {
+ "type": "integer",
+ "description": "The maximum number of `LogService` objects to return in one operation.",
+ "format": "int32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogServices` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogServices` operation is continued.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "projectsId"
+ ],
+ "response": {
+ "$ref": "ListLogServicesResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ }
+ },
+ "resources": {
+ "indexes": {
+ "methods": {
+ "list": {
+ "id": "logging.projects.logServices.indexes.list",
+ "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/indexes",
+ "httpMethod": "GET",
+ "description": "Lists the current index values for a log service.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `serviceName`. The resource name of a log service whose service indexes are requested. Example: `\"projects/my-project-id/logServices/appengine.googleapis.com\"`.",
+ "required": true,
+ "location": "path"
+ },
+ "logServicesId": {
+ "type": "string",
+ "description": "Part of `serviceName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ },
+ "indexPrefix": {
+ "type": "string",
+ "description": "Restricts the index values returned to be those with a specified prefix for each index key. This field has the form `\"/prefix1/prefix2/...\"`, in order corresponding to the `LogService indexKeys`. Non-empty prefixes must begin with `/`. For example, App Engine's two keys are the module ID and the version ID. Following is the effect of using various values for `indexPrefix`: + `\"/Mod/\"` retrieves `/Mod/10` and `/Mod/11` but not `/ModA/10`. + `\"/Mod` retrieves `/Mod/10`, `/Mod/11` and `/ModA/10` but not `/XXX/33`. + `\"/Mod/1\"` retrieves `/Mod/10` and `/Mod/11` but not `/ModA/10`. + `\"/Mod/10/\"` retrieves `/Mod/10` only. + An empty prefix or `\"/\"` retrieves all values.",
+ "location": "query"
+ },
+ "depth": {
+ "type": "integer",
+ "description": "A non-negative integer that limits the number of levels of the index hierarchy that are returned. If `depth` is 1 (default), only the first index key value is returned. If `depth` is 2, both primary and secondary key values are returned. If `depth` is 0, the depth is the number of slash-separators in the `indexPrefix` field, not counting a slash appearing as the last character of the prefix. If the `indexPrefix` field is empty, the default depth is 1. It is an error for `depth` to be any positive value less than the number of components in `indexPrefix`.",
+ "format": "int32",
+ "location": "query"
+ },
+ "pageSize": {
+ "type": "integer",
+ "description": "The maximum number of log service index resources to return in one operation.",
+ "format": "int32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogServiceIndexes` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogServiceIndexes` operation is continued.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logServicesId"
+ ],
+ "response": {
+ "$ref": "ListLogServiceIndexesResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ }
+ }
+ },
+ "sinks": {
+ "methods": {
+ "list": {
+ "id": "logging.projects.logServices.sinks.list",
+ "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks",
+ "httpMethod": "GET",
+ "description": "Lists log service sinks associated with a log service.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `serviceName`. The log service whose sinks are wanted.",
+ "required": true,
+ "location": "path"
+ },
+ "logServicesId": {
+ "type": "string",
+ "description": "Part of `serviceName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logServicesId"
+ ],
+ "response": {
+ "$ref": "ListLogServiceSinksResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "get": {
+ "id": "logging.projects.logServices.sinks.get",
+ "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}",
+ "httpMethod": "GET",
+ "description": "Gets a log service sink.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the log service sink to return.",
+ "required": true,
+ "location": "path"
+ },
+ "logServicesId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logServicesId",
+ "sinksId"
+ ],
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "create": {
+ "id": "logging.projects.logServices.sinks.create",
+ "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks",
+ "httpMethod": "POST",
+ "description": "Creates a log service sink. All log entries from a specified log service are written to the destination.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `serviceName`. The resource name of the log service to which the sink is bound.",
+ "required": true,
+ "location": "path"
+ },
+ "logServicesId": {
+ "type": "string",
+ "description": "Part of `serviceName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logServicesId"
+ ],
+ "request": {
+ "$ref": "LogSink"
+ },
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ },
+ "update": {
+ "id": "logging.projects.logServices.sinks.update",
+ "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}",
+ "httpMethod": "PUT",
+ "description": "Updates a log service sink. If the sink does not exist, it is created.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the log service sink to update.",
+ "required": true,
+ "location": "path"
+ },
+ "logServicesId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logServicesId",
+ "sinksId"
+ ],
+ "request": {
+ "$ref": "LogSink"
+ },
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ },
+ "delete": {
+ "id": "logging.projects.logServices.sinks.delete",
+ "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes a log service sink. After deletion, no new log entries are written to the destination.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the log service sink to delete.",
+ "required": true,
+ "location": "path"
+ },
+ "logServicesId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "logServicesId",
+ "sinksId"
+ ],
+ "response": {
+ "$ref": "Empty"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "sinks": {
+ "methods": {
+ "list": {
+ "id": "logging.projects.sinks.list",
+ "path": "v1beta3/projects/{projectsId}/sinks",
+ "httpMethod": "GET",
+ "description": "Lists project sinks associated with a project.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `projectName`. The project whose sinks are wanted.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId"
+ ],
+ "response": {
+ "$ref": "ListSinksResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "get": {
+ "id": "logging.projects.sinks.get",
+ "path": "v1beta3/projects/{projectsId}/sinks/{sinksId}",
+ "httpMethod": "GET",
+ "description": "Gets a project sink.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the project sink to return.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "sinksId"
+ ],
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "create": {
+ "id": "logging.projects.sinks.create",
+ "path": "v1beta3/projects/{projectsId}/sinks",
+ "httpMethod": "POST",
+ "description": "Creates a project sink. A logs filter determines which log entries are written to the destination.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `projectName`. The resource name of the project to which the sink is bound.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId"
+ ],
+ "request": {
+ "$ref": "LogSink"
+ },
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ },
+ "update": {
+ "id": "logging.projects.sinks.update",
+ "path": "v1beta3/projects/{projectsId}/sinks/{sinksId}",
+ "httpMethod": "PUT",
+ "description": "Updates a project sink. If the sink does not exist, it is created. The destination, filter, or both may be updated.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the project sink to update.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "sinksId"
+ ],
+ "request": {
+ "$ref": "LogSink"
+ },
+ "response": {
+ "$ref": "LogSink"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ },
+ "delete": {
+ "id": "logging.projects.sinks.delete",
+ "path": "v1beta3/projects/{projectsId}/sinks/{sinksId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes a project sink. After deletion, no new log entries are written to the destination.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `sinkName`. The resource name of the project sink to delete.",
+ "required": true,
+ "location": "path"
+ },
+ "sinksId": {
+ "type": "string",
+ "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "sinksId"
+ ],
+ "response": {
+ "$ref": "Empty"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin"
+ ]
+ }
+ }
+ },
+ "metrics": {
+ "methods": {
+ "list": {
+ "id": "logging.projects.metrics.list",
+ "path": "v1beta3/projects/{projectsId}/metrics",
+ "httpMethod": "GET",
+ "description": "Lists the logs-based metrics associated with a project.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `projectName`. The resource name for the project whose metrics are wanted.",
+ "required": true,
+ "location": "path"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogMetrics` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogMetrics` operation is continued.",
+ "location": "query"
+ },
+ "pageSize": {
+ "type": "integer",
+ "description": "The maximum number of `LogMetric` objects to return in one operation.",
+ "format": "int32",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "projectsId"
+ ],
+ "response": {
+ "$ref": "ListLogMetricsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "get": {
+ "id": "logging.projects.metrics.get",
+ "path": "v1beta3/projects/{projectsId}/metrics/{metricsId}",
+ "httpMethod": "GET",
+ "description": "Gets a logs-based metric.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `metricName`. The resource name of the desired metric.",
+ "required": true,
+ "location": "path"
+ },
+ "metricsId": {
+ "type": "string",
+ "description": "Part of `metricName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "metricsId"
+ ],
+ "response": {
+ "$ref": "LogMetric"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read"
+ ]
+ },
+ "create": {
+ "id": "logging.projects.metrics.create",
+ "path": "v1beta3/projects/{projectsId}/metrics",
+ "httpMethod": "POST",
+ "description": "Creates a logs-based metric.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `projectName`. The resource name of the project in which to create the metric.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId"
+ ],
+ "request": {
+ "$ref": "LogMetric"
+ },
+ "response": {
+ "$ref": "LogMetric"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.write"
+ ]
+ },
+ "update": {
+ "id": "logging.projects.metrics.update",
+ "path": "v1beta3/projects/{projectsId}/metrics/{metricsId}",
+ "httpMethod": "PUT",
+ "description": "Creates or updates a logs-based metric.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `metricName`. The resource name of the metric to update.",
+ "required": true,
+ "location": "path"
+ },
+ "metricsId": {
+ "type": "string",
+ "description": "Part of `metricName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "metricsId"
+ ],
+ "request": {
+ "$ref": "LogMetric"
+ },
+ "response": {
+ "$ref": "LogMetric"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.write"
+ ]
+ },
+ "delete": {
+ "id": "logging.projects.metrics.delete",
+ "path": "v1beta3/projects/{projectsId}/metrics/{metricsId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes a logs-based metric.",
+ "parameters": {
+ "projectsId": {
+ "type": "string",
+ "description": "Part of `metricName`. The resource name of the metric to delete.",
+ "required": true,
+ "location": "path"
+ },
+ "metricsId": {
+ "type": "string",
+ "description": "Part of `metricName`. See documentation of `projectsId`.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectsId",
+ "metricsId"
+ ],
+ "response": {
+ "$ref": "Empty"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.write"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go b/vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go
new file mode 100644
index 0000000000..362338c180
--- /dev/null
+++ b/vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go
@@ -0,0 +1,4787 @@
+// Package logging provides access to the Google Cloud Logging API.
+//
+// See https://cloud.google.com/logging/docs/
+//
+// Usage example:
+//
+// import "google.golang.org/api/logging/v1beta3"
+// ...
+// loggingService, err := logging.New(oauthHttpClient)
+package logging // import "google.golang.org/api/logging/v1beta3"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ context "golang.org/x/net/context"
+ ctxhttp "golang.org/x/net/context/ctxhttp"
+ gensupport "google.golang.org/api/gensupport"
+ googleapi "google.golang.org/api/googleapi"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+// Always reference these packages, just in case the auto-generated code
+// below doesn't.
+var _ = bytes.NewBuffer
+var _ = strconv.Itoa
+var _ = fmt.Sprintf
+var _ = json.NewDecoder
+var _ = io.Copy
+var _ = url.Parse
+var _ = gensupport.MarshalJSON
+var _ = googleapi.Version
+var _ = errors.New
+var _ = strings.Replace
+var _ = context.Canceled
+var _ = ctxhttp.Do
+
+const apiId = "logging:v1beta3"
+const apiName = "logging"
+const apiVersion = "v1beta3"
+const basePath = "https://logging.googleapis.com/"
+
+// OAuth2 scopes used by this API.
+const (
+ // View and manage your data across Google Cloud Platform services
+ CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
+
+ // View your data across Google Cloud Platform services
+ CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only"
+
+ // Administrate log data for your projects
+ LoggingAdminScope = "https://www.googleapis.com/auth/logging.admin"
+
+ // View log data for your projects
+ LoggingReadScope = "https://www.googleapis.com/auth/logging.read"
+
+ // Submit log data for your projects
+ LoggingWriteScope = "https://www.googleapis.com/auth/logging.write"
+)
+
+func New(client *http.Client) (*Service, error) {
+ if client == nil {
+ return nil, errors.New("client is nil")
+ }
+ s := &Service{client: client, BasePath: basePath}
+ s.Projects = NewProjectsService(s)
+ return s, nil
+}
+
+type Service struct {
+ client *http.Client
+ BasePath string // API endpoint base URL
+ UserAgent string // optional additional User-Agent fragment
+
+ Projects *ProjectsService
+}
+
+func (s *Service) userAgent() string {
+ if s.UserAgent == "" {
+ return googleapi.UserAgent
+ }
+ return googleapi.UserAgent + " " + s.UserAgent
+}
+
+func NewProjectsService(s *Service) *ProjectsService {
+ rs := &ProjectsService{s: s}
+ rs.LogServices = NewProjectsLogServicesService(s)
+ rs.Logs = NewProjectsLogsService(s)
+ rs.Metrics = NewProjectsMetricsService(s)
+ rs.Sinks = NewProjectsSinksService(s)
+ return rs
+}
+
+type ProjectsService struct {
+ s *Service
+
+ LogServices *ProjectsLogServicesService
+
+ Logs *ProjectsLogsService
+
+ Metrics *ProjectsMetricsService
+
+ Sinks *ProjectsSinksService
+}
+
+func NewProjectsLogServicesService(s *Service) *ProjectsLogServicesService {
+ rs := &ProjectsLogServicesService{s: s}
+ rs.Indexes = NewProjectsLogServicesIndexesService(s)
+ rs.Sinks = NewProjectsLogServicesSinksService(s)
+ return rs
+}
+
+type ProjectsLogServicesService struct {
+ s *Service
+
+ Indexes *ProjectsLogServicesIndexesService
+
+ Sinks *ProjectsLogServicesSinksService
+}
+
+func NewProjectsLogServicesIndexesService(s *Service) *ProjectsLogServicesIndexesService {
+ rs := &ProjectsLogServicesIndexesService{s: s}
+ return rs
+}
+
+type ProjectsLogServicesIndexesService struct {
+ s *Service
+}
+
+func NewProjectsLogServicesSinksService(s *Service) *ProjectsLogServicesSinksService {
+ rs := &ProjectsLogServicesSinksService{s: s}
+ return rs
+}
+
+type ProjectsLogServicesSinksService struct {
+ s *Service
+}
+
+func NewProjectsLogsService(s *Service) *ProjectsLogsService {
+ rs := &ProjectsLogsService{s: s}
+ rs.Entries = NewProjectsLogsEntriesService(s)
+ rs.Sinks = NewProjectsLogsSinksService(s)
+ return rs
+}
+
+type ProjectsLogsService struct {
+ s *Service
+
+ Entries *ProjectsLogsEntriesService
+
+ Sinks *ProjectsLogsSinksService
+}
+
+func NewProjectsLogsEntriesService(s *Service) *ProjectsLogsEntriesService {
+ rs := &ProjectsLogsEntriesService{s: s}
+ return rs
+}
+
+type ProjectsLogsEntriesService struct {
+ s *Service
+}
+
+func NewProjectsLogsSinksService(s *Service) *ProjectsLogsSinksService {
+ rs := &ProjectsLogsSinksService{s: s}
+ return rs
+}
+
+type ProjectsLogsSinksService struct {
+ s *Service
+}
+
+func NewProjectsMetricsService(s *Service) *ProjectsMetricsService {
+ rs := &ProjectsMetricsService{s: s}
+ return rs
+}
+
+type ProjectsMetricsService struct {
+ s *Service
+}
+
+func NewProjectsSinksService(s *Service) *ProjectsSinksService {
+ rs := &ProjectsSinksService{s: s}
+ return rs
+}
+
+type ProjectsSinksService struct {
+ s *Service
+}
+
+// Empty: A generic empty message that you can re-use to avoid defining
+// duplicated empty messages in your APIs. A typical example is to use
+// it as the request or the response type of an API method. For
+// instance: service Foo { rpc Bar(google.protobuf.Empty) returns
+// (google.protobuf.Empty); } The JSON representation for `Empty` is
+// empty JSON object `{}`.
+type Empty struct {
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+}
+
+// HttpRequest: A common proto for logging HTTP requests.
+type HttpRequest struct {
+ // CacheHit: Whether or not an entity was served from cache (with or
+ // without validation).
+ CacheHit bool `json:"cacheHit,omitempty"`
+
+ // Referer: Referer (a.k.a. referrer) URL of request, as defined in
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html.
+ Referer string `json:"referer,omitempty"`
+
+ // RemoteIp: IP address of the client who issues the HTTP request. Could
+ // be either IPv4 or IPv6.
+ RemoteIp string `json:"remoteIp,omitempty"`
+
+ // RequestMethod: Request method, such as `GET`, `HEAD`, `PUT` or
+ // `POST`.
+ RequestMethod string `json:"requestMethod,omitempty"`
+
+ // RequestSize: Size of the HTTP request message in bytes, including
+ // request headers and the request body.
+ RequestSize int64 `json:"requestSize,omitempty,string"`
+
+ // RequestUrl: Contains the scheme (http|https), the host name, the path
+ // and the query portion of the URL that was requested.
+ RequestUrl string `json:"requestUrl,omitempty"`
+
+ // ResponseSize: Size of the HTTP response message in bytes sent back to
+ // the client, including response headers and response body.
+ ResponseSize int64 `json:"responseSize,omitempty,string"`
+
+ // Status: A response code indicates the status of response, e.g., 200.
+ Status int64 `json:"status,omitempty"`
+
+ // UserAgent: User agent sent by the client, e.g., "Mozilla/4.0
+ // (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)".
+ UserAgent string `json:"userAgent,omitempty"`
+
+ // ValidatedWithOriginServer: Whether or not the response was validated
+ // with the origin server before being served from cache. This field is
+ // only meaningful if cache_hit is True.
+ ValidatedWithOriginServer bool `json:"validatedWithOriginServer,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "CacheHit") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *HttpRequest) MarshalJSON() ([]byte, error) {
+ type noMethod HttpRequest
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ListLogMetricsResponse: Result returned from ListLogMetrics.
+type ListLogMetricsResponse struct {
+ // Metrics: The list of metrics that was requested.
+ Metrics []*LogMetric `json:"metrics,omitempty"`
+
+ // NextPageToken: If there are more results, then `nextPageToken` is
+ // returned in the response. To get the next batch of entries, use the
+ // value of `nextPageToken` as `pageToken` in the next call of
+ // `ListLogMetrics`. If `nextPageToken` is empty, then there are no more
+ // results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Metrics") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ListLogMetricsResponse) MarshalJSON() ([]byte, error) {
+ type noMethod ListLogMetricsResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ListLogServiceIndexesResponse: Result returned from
+// ListLogServiceIndexesRequest.
+type ListLogServiceIndexesResponse struct {
+ // NextPageToken: If there are more results, then `nextPageToken` is
+ // returned in the response. To get the next batch of indexes, use the
+ // value of `nextPageToken` as `pageToken` in the next call of
+ // `ListLogServiceIndexes`. If `nextPageToken` is empty, then there are
+ // no more results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // ServiceIndexPrefixes: A list of log service index values. Each index
+ // value has the form "/value1/value2/...", where `value1` is a value
+ // in the primary index, `value2` is a value in the secondary index, and
+ // so forth.
+ ServiceIndexPrefixes []string `json:"serviceIndexPrefixes,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "NextPageToken") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ListLogServiceIndexesResponse) MarshalJSON() ([]byte, error) {
+ type noMethod ListLogServiceIndexesResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ListLogServiceSinksResponse: Result returned from
+// `ListLogServiceSinks`.
+type ListLogServiceSinksResponse struct {
+ // Sinks: The requested log service sinks. If a returned `LogSink`
+ // object has an empty `destination` field, the client can retrieve the
+ // complete `LogSink` object by calling `logServices.sinks.get`.
+ Sinks []*LogSink `json:"sinks,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Sinks") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ListLogServiceSinksResponse) MarshalJSON() ([]byte, error) {
+ type noMethod ListLogServiceSinksResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ListLogServicesResponse: Result returned from
+// `ListLogServicesRequest`.
+type ListLogServicesResponse struct {
+ // LogServices: A list of log services.
+ LogServices []*LogService `json:"logServices,omitempty"`
+
+ // NextPageToken: If there are more results, then `nextPageToken` is
+ // returned in the response. To get the next batch of services, use the
+ // value of `nextPageToken` as `pageToken` in the next call of
+ // `ListLogServices`. If `nextPageToken` is empty, then there are no
+ // more results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "LogServices") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ListLogServicesResponse) MarshalJSON() ([]byte, error) {
+ type noMethod ListLogServicesResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ListLogSinksResponse: Result returned from `ListLogSinks`.
+type ListLogSinksResponse struct {
+ // Sinks: The requested log sinks. If a returned `LogSink` object has an
+ // empty `destination` field, the client can retrieve the complete
+ // `LogSink` object by calling `log.sinks.get`.
+ Sinks []*LogSink `json:"sinks,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Sinks") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ListLogSinksResponse) MarshalJSON() ([]byte, error) {
+ type noMethod ListLogSinksResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ListLogsResponse: Result returned from ListLogs.
+type ListLogsResponse struct {
+ // Logs: A list of log descriptions matching the criteria.
+ Logs []*Log `json:"logs,omitempty"`
+
+ // NextPageToken: If there are more results, then `nextPageToken` is
+ // returned in the response. To get the next batch of logs, use the
+ // value of `nextPageToken` as `pageToken` in the next call of
+ // `ListLogs`. If `nextPageToken` is empty, then there are no more
+ // results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Logs") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ListLogsResponse) MarshalJSON() ([]byte, error) {
+ type noMethod ListLogsResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ListSinksResponse: Result returned from `ListSinks`.
+type ListSinksResponse struct {
+ // Sinks: The requested sinks. If a returned `LogSink` object has an
+ // empty `destination` field, the client can retrieve the complete
+ // `LogSink` object by calling `projects.sinks.get`.
+ Sinks []*LogSink `json:"sinks,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Sinks") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ListSinksResponse) MarshalJSON() ([]byte, error) {
+ type noMethod ListSinksResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// Log: _Output only._ Describes a log, which is a named stream of log
+// entries.
+type Log struct {
+ // DisplayName: _Optional._ The common name of the log. Example:
+ // "request_log".
+ DisplayName string `json:"displayName,omitempty"`
+
+ // Name: The resource name of the log. Example:
+ // "/projects/my-gcp-project-id/logs/LOG_NAME", where `LOG_NAME` is
+ // the URL-encoded given name of the log. The log includes those log
+ // entries whose `LogEntry.log` field contains this given name. To avoid
+ // name collisions, it is a best practice to prefix the given log name
+ // with the service name, but this is not required. Examples of log
+ // given names: "appengine.googleapis.com/request_log",
+ // "apache-access".
+ Name string `json:"name,omitempty"`
+
+ // PayloadType: _Optional_. A URI representing the expected payload type
+ // for log entries.
+ PayloadType string `json:"payloadType,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "DisplayName") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *Log) MarshalJSON() ([]byte, error) {
+ type noMethod Log
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// LogEntry: An individual entry in a log.
+type LogEntry struct {
+ // HttpRequest: Information about the HTTP request associated with this
+ // log entry, if applicable.
+ HttpRequest *HttpRequest `json:"httpRequest,omitempty"`
+
+ // InsertId: A unique ID for the log entry. If you provide this field,
+ // the logging service considers other log entries in the same log with
+ // the same ID as duplicates which can be removed.
+ InsertId string `json:"insertId,omitempty"`
+
+ // Log: The log to which this entry belongs. When a log entry is
+ // ingested, the value of this field is set by the logging system.
+ Log string `json:"log,omitempty"`
+
+ // Metadata: Information about the log entry.
+ Metadata *LogEntryMetadata `json:"metadata,omitempty"`
+
+ // ProtoPayload: The log entry payload, represented as a protocol buffer
+ // that is expressed as a JSON object. You can only pass `protoPayload`
+ // values that belong to a set of approved types.
+ ProtoPayload LogEntryProtoPayload `json:"protoPayload,omitempty"`
+
+ // StructPayload: The log entry payload, represented as a structure that
+ // is expressed as a JSON object.
+ StructPayload LogEntryStructPayload `json:"structPayload,omitempty"`
+
+ // TextPayload: The log entry payload, represented as a Unicode string
+ // (UTF-8).
+ TextPayload string `json:"textPayload,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "HttpRequest") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *LogEntry) MarshalJSON() ([]byte, error) {
+ type noMethod LogEntry
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+type LogEntryProtoPayload interface{}
+
+type LogEntryStructPayload interface{}
+
+// LogEntryMetadata: Additional data that is associated with a log
+// entry, set by the service creating the log entry.
+type LogEntryMetadata struct {
+ // Labels: A set of (key, value) data that provides additional
+ // information about the log entry. If the log entry is from one of the
+ // Google Cloud Platform sources listed below, the indicated (key,
+ // value) information must be provided: Google App Engine, service_name
+ // `appengine.googleapis.com`: "appengine.googleapis.com/module_id",
+ // "appengine.googleapis.com/version_id", and one of:
+ // "appengine.googleapis.com/replica_index",
+ // "appengine.googleapis.com/clone_id", or else provide the following
+ // Compute Engine labels: Google Compute Engine, service_name
+ // `compute.googleapis.com`: "compute.googleapis.com/resource_type",
+ // "instance" "compute.googleapis.com/resource_id",
+ Labels map[string]string `json:"labels,omitempty"`
+
+ // ProjectId: The project ID of the Google Cloud Platform service that
+ // created the log entry.
+ ProjectId string `json:"projectId,omitempty"`
+
+ // Region: The region name of the Google Cloud Platform service that
+ // created the log entry. For example, "us-central1".
+ Region string `json:"region,omitempty"`
+
+ // ServiceName: The API name of the Google Cloud Platform service that
+ // created the log entry. For example, "compute.googleapis.com".
+ ServiceName string `json:"serviceName,omitempty"`
+
+ // Severity: The severity of the log entry.
+ //
+ // Possible values:
+ // "DEFAULT"
+ // "DEBUG"
+ // "INFO"
+ // "NOTICE"
+ // "WARNING"
+ // "ERROR"
+ // "CRITICAL"
+ // "ALERT"
+ // "EMERGENCY"
+ Severity string `json:"severity,omitempty"`
+
+ // Timestamp: The time the event described by the log entry occurred.
+ // Timestamps must be later than January 1, 1970.
+ Timestamp string `json:"timestamp,omitempty"`
+
+ // UserId: The fully-qualified email address of the authenticated user
+ // that performed or requested the action represented by the log entry.
+ // If the log entry does not apply to an action taken by an
+ // authenticated user, then the field should be empty.
+ UserId string `json:"userId,omitempty"`
+
+ // Zone: The zone of the Google Cloud Platform service that created the
+ // log entry. For example, "us-central1-a".
+ Zone string `json:"zone,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Labels") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *LogEntryMetadata) MarshalJSON() ([]byte, error) {
+ type noMethod LogEntryMetadata
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// LogError: Describes a problem with a logging resource or operation.
+type LogError struct {
+ // Resource: A resource name associated with this error. For example,
+ // the name of a Cloud Storage bucket that has insufficient permissions
+ // to be a destination for log entries.
+ Resource string `json:"resource,omitempty"`
+
+ // Status: The error description, including a classification code, an
+ // error message, and other details.
+ Status *Status `json:"status,omitempty"`
+
+ // TimeNanos: The time the error was observed, in nanoseconds since the
+ // Unix epoch.
+ TimeNanos int64 `json:"timeNanos,omitempty,string"`
+
+ // ForceSendFields is a list of field names (e.g. "Resource") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *LogError) MarshalJSON() ([]byte, error) {
+ type noMethod LogError
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// LogLine: Application log line emitted while processing a request.
+type LogLine struct {
+ // LogMessage: App provided log message.
+ LogMessage string `json:"logMessage,omitempty"`
+
+ // Severity: Severity of log.
+ //
+ // Possible values:
+ // "DEFAULT"
+ // "DEBUG"
+ // "INFO"
+ // "NOTICE"
+ // "WARNING"
+ // "ERROR"
+ // "CRITICAL"
+ // "ALERT"
+ // "EMERGENCY"
+ Severity string `json:"severity,omitempty"`
+
+ // SourceLocation: Line of code that generated this log message.
+ SourceLocation *SourceLocation `json:"sourceLocation,omitempty"`
+
+ // Time: Time when log entry was made. May be inaccurate.
+ Time string `json:"time,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "LogMessage") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *LogLine) MarshalJSON() ([]byte, error) {
+ type noMethod LogLine
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// LogMetric: Describes a logs-based metric. The value of the metric is
+// the number of log entries in your project that match a logs filter.
+type LogMetric struct {
+ // Description: A description of this metric.
+ Description string `json:"description,omitempty"`
+
+ // Filter: An [advanced logs
+ // filter](/logging/docs/view/advanced_filters). Example: "log:syslog
+ // AND metadata.severity>=ERROR".
+ Filter string `json:"filter,omitempty"`
+
+ // Name: The client-assigned name for this metric, such as
+ // "severe_errors". Metric names are limited to 1000 characters and
+ // can include only the following characters: `A-Z`, `a-z`, `0-9`, and
+ // the special characters `_-.,+!*',()%/\`. The slash character (`/`)
+ // denotes a hierarchy of name pieces, and it cannot be the first
+ // character of the name.
+ Name string `json:"name,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Description") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *LogMetric) MarshalJSON() ([]byte, error) {
+ type noMethod LogMetric
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// LogService: _Output only._ Describes a service that writes log
+// entries.
+type LogService struct {
+ // IndexKeys: A list of the names of the keys used to index and label
+ // individual log entries from this service. The first two keys are used
+ // as the primary and secondary index, respectively. Additional keys may
+ // be used to label the entries. For example, App Engine indexes its
+ // entries by module and by version, so its `indexKeys` field is the
+ // following: [ "appengine.googleapis.com/module_id",
+ // "appengine.googleapis.com/version_id" ]
+ IndexKeys []string `json:"indexKeys,omitempty"`
+
+ // Name: The service's name. Example: "appengine.googleapis.com". Log
+ // names beginning with this string are reserved for this service. This
+ // value can appear in the `LogEntry.metadata.serviceName` field of log
+ // entries associated with this log service.
+ Name string `json:"name,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "IndexKeys") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *LogService) MarshalJSON() ([]byte, error) {
+ type noMethod LogService
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// LogSink: Describes where log entries are written outside of Cloud
+// Logging.
+type LogSink struct {
+ // Destination: The resource name of the destination. Cloud Logging
+ // writes designated log entries to this destination. For example,
+ // "storage.googleapis.com/my-output-bucket".
+ Destination string `json:"destination,omitempty"`
+
+ // Errors: _Output only._ If any errors occur when invoking a sink
+ // method, then this field contains descriptions of the errors.
+ Errors []*LogError `json:"errors,omitempty"`
+
+ // Filter: An advanced logs filter. If present, only log entries
+ // matching the filter are written. Only project sinks use this field;
+ // log sinks and log service sinks must not include a filter.
+ Filter string `json:"filter,omitempty"`
+
+ // Name: The client-assigned name of this sink. For example,
+ // "my-syslog-sink". The name must be unique among the sinks of a
+ // similar kind in the project.
+ Name string `json:"name,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Destination") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *LogSink) MarshalJSON() ([]byte, error) {
+ type noMethod LogSink
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// RequestLog: Complete log information about a single request to an
+// application.
+type RequestLog struct {
+ // AppEngineRelease: App Engine release version string.
+ AppEngineRelease string `json:"appEngineRelease,omitempty"`
+
+ // AppId: Identifies the application that handled this request.
+ AppId string `json:"appId,omitempty"`
+
+ // Cost: An indication of the relative cost of serving this request.
+ Cost float64 `json:"cost,omitempty"`
+
+ // EndTime: Time at which request was known to end processing.
+ EndTime string `json:"endTime,omitempty"`
+
+ // Finished: If true, represents a finished request. Otherwise, the
+ // request is active.
+ Finished bool `json:"finished,omitempty"`
+
+ // Host: The Internet host and port number of the resource being
+ // requested.
+ Host string `json:"host,omitempty"`
+
+ // HttpVersion: HTTP version of request.
+ HttpVersion string `json:"httpVersion,omitempty"`
+
+ // InstanceId: An opaque identifier for the instance that handled the
+ // request.
+ InstanceId string `json:"instanceId,omitempty"`
+
+ // InstanceIndex: If the instance that processed this request was
+ // individually addressable (i.e. belongs to a manually scaled module),
+ // this is the index of the instance.
+ InstanceIndex int64 `json:"instanceIndex,omitempty"`
+
+ // Ip: Origin IP address.
+ Ip string `json:"ip,omitempty"`
+
+ // Latency: Latency of the request.
+ Latency string `json:"latency,omitempty"`
+
+ // Line: List of log lines emitted by the application while serving this
+ // request, if requested.
+ Line []*LogLine `json:"line,omitempty"`
+
+ // MegaCycles: Number of CPU megacycles used to process request.
+ MegaCycles int64 `json:"megaCycles,omitempty,string"`
+
+ // Method: Request method, such as `GET`, `HEAD`, `PUT`, `POST`, or
+ // `DELETE`.
+ Method string `json:"method,omitempty"`
+
+ // ModuleId: Identifies the module of the application that handled this
+ // request.
+ ModuleId string `json:"moduleId,omitempty"`
+
+ // Nickname: A string that identifies a logged-in user who made this
+ // request, or empty if the user is not logged in. Most likely, this is
+ // the part of the user's email before the '@' sign. The field value is
+ // the same for different requests from the same user, but different
+ // users may have a similar name. This information is also available to
+ // the application via Users API. This field will be populated starting
+ // with App Engine 1.9.21.
+ Nickname string `json:"nickname,omitempty"`
+
+ // PendingTime: Time this request spent in the pending request queue, if
+ // it was pending at all.
+ PendingTime string `json:"pendingTime,omitempty"`
+
+ // Referrer: Referrer URL of request.
+ Referrer string `json:"referrer,omitempty"`
+
+ // RequestId: Globally unique identifier for a request, based on request
+ // start time. Request IDs for requests which started later will compare
+ // greater as strings than those for requests which started earlier.
+ RequestId string `json:"requestId,omitempty"`
+
+ // Resource: Contains the path and query portion of the URL that was
+ // requested. For example, if the URL was
+ // "http://example.com/app?name=val", the resource would be
+ // "/app?name=val". Any trailing fragment (separated by a '#' character)
+ // will not be included.
+ Resource string `json:"resource,omitempty"`
+
+ // ResponseSize: Size in bytes sent back to client by request.
+ ResponseSize int64 `json:"responseSize,omitempty,string"`
+
+ // SourceReference: Source code for the application that handled this
+ // request. There can be more than one source reference per deployed
+ // application if source code is distributed among multiple
+ // repositories.
+ SourceReference []*SourceReference `json:"sourceReference,omitempty"`
+
+ // StartTime: Time at which request was known to have begun processing.
+ StartTime string `json:"startTime,omitempty"`
+
+ // Status: Response status of request.
+ Status int64 `json:"status,omitempty"`
+
+ // TaskName: Task name of the request (for an offline request).
+ TaskName string `json:"taskName,omitempty"`
+
+ // TaskQueueName: Queue name of the request (for an offline request).
+ TaskQueueName string `json:"taskQueueName,omitempty"`
+
+ // TraceId: Cloud Trace identifier of the trace for this request.
+ TraceId string `json:"traceId,omitempty"`
+
+ // UrlMapEntry: File or class within URL mapping used for request.
+ // Useful for tracking down the source code which was responsible for
+ // managing request. Especially for multiply mapped handlers.
+ UrlMapEntry string `json:"urlMapEntry,omitempty"`
+
+ // UserAgent: User agent used for making request.
+ UserAgent string `json:"userAgent,omitempty"`
+
+ // VersionId: Version of the application that handled this request.
+ VersionId string `json:"versionId,omitempty"`
+
+ // WasLoadingRequest: Was this request a loading request for this
+ // instance?
+ WasLoadingRequest bool `json:"wasLoadingRequest,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "AppEngineRelease") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *RequestLog) MarshalJSON() ([]byte, error) {
+ type noMethod RequestLog
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// SourceLocation: Specifies a location in a source file.
+type SourceLocation struct {
+ // File: Source file name. May or may not be a fully qualified name,
+ // depending on the runtime environment.
+ File string `json:"file,omitempty"`
+
+ // FunctionName: Human-readable name of the function or method being
+ // invoked, with optional context such as the class or package name, for
+ // use in contexts such as the logs viewer where file:line number is
+ // less meaningful. This may vary by language, for example: in Java:
+ // qual.if.ied.Class.method in Go: dir/package.func in Python: function
+ // ...
+ FunctionName string `json:"functionName,omitempty"`
+
+ // Line: Line within the source file.
+ Line int64 `json:"line,omitempty,string"`
+
+ // ForceSendFields is a list of field names (e.g. "File") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *SourceLocation) MarshalJSON() ([]byte, error) {
+ type noMethod SourceLocation
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// SourceReference: A reference to a particular snapshot of the source
+// tree used to build and deploy an application.
+type SourceReference struct {
+ // Repository: Optional. A URI string identifying the repository.
+ // Example: "https://github.com/GoogleCloudPlatform/kubernetes.git"
+ Repository string `json:"repository,omitempty"`
+
+ // RevisionId: The canonical (and persistent) identifier of the deployed
+ // revision. Example (git): "0035781c50ec7aa23385dc841529ce8a4b70db1b"
+ RevisionId string `json:"revisionId,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Repository") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *SourceReference) MarshalJSON() ([]byte, error) {
+ type noMethod SourceReference
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// Status: The `Status` type defines a logical error model that is
+// suitable for different programming environments, including REST APIs
+// and RPC APIs. It is used by [gRPC](https://github.com/grpc). The
+// error model is designed to be: - Simple to use and understand for
+// most users - Flexible enough to meet unexpected needs # Overview The
+// `Status` message contains three pieces of data: error code, error
+// message, and error details. The error code should be an enum value of
+// google.rpc.Code, but it may accept additional error codes if needed.
+// The error message should be a developer-facing English message that
+// helps developers *understand* and *resolve* the error. If a localized
+// user-facing error message is needed, put the localized message in the
+// error details or localize it in the client. The optional error
+// details may contain arbitrary information about the error. There is a
+// predefined set of error detail types in the package `google.rpc`
+// which can be used for common error conditions. # Language mapping The
+// `Status` message is the logical representation of the error model,
+// but it is not necessarily the actual wire format. When the `Status`
+// message is exposed in different client libraries and different wire
+// protocols, it can be mapped differently. For example, it will likely
+// be mapped to some exceptions in Java, but more likely mapped to some
+// error codes in C. # Other uses The error model and the `Status`
+// message can be used in a variety of environments, either with or
+// without APIs, to provide a consistent developer experience across
+// different environments. Example uses of this error model include: -
+// Partial errors. If a service needs to return partial errors to the
+// client, it may embed the `Status` in the normal response to indicate
+// the partial errors. - Workflow errors. A typical workflow has
+// multiple steps. Each step may have a `Status` message for error
+// reporting purpose. - Batch operations. If a client uses batch request
+// and batch response, the `Status` message should be used directly
+// inside batch response, one for each error sub-response. -
+// Asynchronous operations. If an API call embeds asynchronous operation
+// results in its response, the status of those operations should be
+// represented directly using the `Status` message. - Logging. If some
+// API errors are stored in logs, the message `Status` could be used
+// directly after any stripping needed for security/privacy reasons.
+type Status struct {
+ // Code: The status code, which should be an enum value of
+ // google.rpc.Code.
+ Code int64 `json:"code,omitempty"`
+
+ // Details: A list of messages that carry the error details. There will
+ // be a common set of message types for APIs to use.
+ Details []StatusDetails `json:"details,omitempty"`
+
+ // Message: A developer-facing error message, which should be in
+ // English. Any user-facing error message should be localized and sent
+ // in the google.rpc.Status.details field, or localized by the client.
+ Message string `json:"message,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Code") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *Status) MarshalJSON() ([]byte, error) {
+ type noMethod Status
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+type StatusDetails interface{}
+
+// WriteLogEntriesRequest: The parameters to WriteLogEntries.
+type WriteLogEntriesRequest struct {
+ // CommonLabels: Metadata labels that apply to all log entries in this
+ // request, so that you don't have to repeat them in each log entry's
+ // `metadata.labels` field. If any of the log entries contains a (key,
+ // value) with the same key that is in `commonLabels`, then the entry's
+ // (key, value) overrides the one in `commonLabels`.
+ CommonLabels map[string]string `json:"commonLabels,omitempty"`
+
+ // Entries: Log entries to insert.
+ Entries []*LogEntry `json:"entries,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "CommonLabels") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *WriteLogEntriesRequest) MarshalJSON() ([]byte, error) {
+ type noMethod WriteLogEntriesRequest
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// WriteLogEntriesResponse: Result returned from WriteLogEntries. empty
+type WriteLogEntriesResponse struct {
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+}
+
+// method id "logging.projects.logServices.list":
+
+type ProjectsLogServicesListCall struct {
+ s *Service
+ projectsId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Lists the log services that have log entries in this project.
+func (r *ProjectsLogServicesService) List(projectsId string) *ProjectsLogServicesListCall {
+ c := &ProjectsLogServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ return c
+}
+
+// PageSize sets the optional parameter "pageSize": The maximum number
+// of `LogService` objects to return in one operation.
+func (c *ProjectsLogServicesListCall) PageSize(pageSize int64) *ProjectsLogServicesListCall {
+ c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": An opaque token,
+// returned as `nextPageToken` by a prior `ListLogServices` operation.
+// If `pageToken` is supplied, then the other fields of this request are
+// ignored, and instead the previous `ListLogServices` operation is
+// continued.
+func (c *ProjectsLogServicesListCall) PageToken(pageToken string) *ProjectsLogServicesListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogServicesListCall) QuotaUser(quotaUser string) *ProjectsLogServicesListCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogServicesListCall) Fields(s ...googleapi.Field) *ProjectsLogServicesListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLogServicesListCall) IfNoneMatch(entityTag string) *ProjectsLogServicesListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogServicesListCall) Context(ctx context.Context) *ProjectsLogServicesListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogServicesListCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logServices.list" call.
+// Exactly one of *ListLogServicesResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ListLogServicesResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLogServicesListCall) Do() (*ListLogServicesResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ListLogServicesResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists the log services that have log entries in this project.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.logServices.list",
+ // "parameterOrder": [
+ // "projectsId"
+ // ],
+ // "parameters": {
+ // "pageSize": {
+ // "description": "The maximum number of `LogService` objects to return in one operation.",
+ // "format": "int32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogServices` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogServices` operation is continued.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `projectName`. The resource name of the project whose services are to be listed.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logServices",
+ // "response": {
+ // "$ref": "ListLogServicesResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logServices.indexes.list":
+
+type ProjectsLogServicesIndexesListCall struct {
+ s *Service
+ projectsId string
+ logServicesId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Lists the current index values for a log service.
+func (r *ProjectsLogServicesIndexesService) List(projectsId string, logServicesId string) *ProjectsLogServicesIndexesListCall {
+ c := &ProjectsLogServicesIndexesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logServicesId = logServicesId
+ return c
+}
+
+// Depth sets the optional parameter "depth": A non-negative integer
+// that limits the number of levels of the index hierarchy that are
+// returned. If `depth` is 1 (default), only the first index key value
+// is returned. If `depth` is 2, both primary and secondary key values
+// are returned. If `depth` is 0, the depth is the number of
+// slash-separators in the `indexPrefix` field, not counting a slash
+// appearing as the last character of the prefix. If the `indexPrefix`
+// field is empty, the default depth is 1. It is an error for `depth` to
+// be any positive value less than the number of components in
+// `indexPrefix`.
+func (c *ProjectsLogServicesIndexesListCall) Depth(depth int64) *ProjectsLogServicesIndexesListCall {
+ c.urlParams_.Set("depth", fmt.Sprint(depth))
+ return c
+}
+
+// IndexPrefix sets the optional parameter "indexPrefix": Restricts the
+// index values returned to be those with a specified prefix for each
+// index key. This field has the form "/prefix1/prefix2/...", in order
+// corresponding to the `LogService indexKeys`. Non-empty prefixes must
+// begin with `/`. For example, App Engine's two keys are the module ID
+// and the version ID. Following is the effect of using various values
+// for `indexPrefix`: + "/Mod/" retrieves `/Mod/10` and `/Mod/11` but
+// not `/ModA/10`. + "/Mod` retrieves `/Mod/10`, `/Mod/11` and
+// `/ModA/10` but not `/XXX/33`. + "/Mod/1" retrieves `/Mod/10` and
+// `/Mod/11` but not `/ModA/10`. + "/Mod/10/" retrieves `/Mod/10`
+// only. + An empty prefix or "/" retrieves all values.
+func (c *ProjectsLogServicesIndexesListCall) IndexPrefix(indexPrefix string) *ProjectsLogServicesIndexesListCall {
+ c.urlParams_.Set("indexPrefix", indexPrefix)
+ return c
+}
+
+// PageSize sets the optional parameter "pageSize": The maximum number
+// of log service index resources to return in one operation.
+func (c *ProjectsLogServicesIndexesListCall) PageSize(pageSize int64) *ProjectsLogServicesIndexesListCall {
+ c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": An opaque token,
+// returned as `nextPageToken` by a prior `ListLogServiceIndexes`
+// operation. If `pageToken` is supplied, then the other fields of this
+// request are ignored, and instead the previous `ListLogServiceIndexes`
+// operation is continued.
+func (c *ProjectsLogServicesIndexesListCall) PageToken(pageToken string) *ProjectsLogServicesIndexesListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogServicesIndexesListCall) QuotaUser(quotaUser string) *ProjectsLogServicesIndexesListCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogServicesIndexesListCall) Fields(s ...googleapi.Field) *ProjectsLogServicesIndexesListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLogServicesIndexesListCall) IfNoneMatch(entityTag string) *ProjectsLogServicesIndexesListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogServicesIndexesListCall) Context(ctx context.Context) *ProjectsLogServicesIndexesListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogServicesIndexesListCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices/{logServicesId}/indexes")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logServicesId": c.logServicesId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logServices.indexes.list" call.
+// Exactly one of *ListLogServiceIndexesResponse or error will be
+// non-nil. Any non-2xx status code is an error. Response headers are in
+// either *ListLogServiceIndexesResponse.ServerResponse.Header or (if a
+// response was returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLogServicesIndexesListCall) Do() (*ListLogServiceIndexesResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ListLogServiceIndexesResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists the current index values for a log service.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.logServices.indexes.list",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logServicesId"
+ // ],
+ // "parameters": {
+ // "depth": {
+ // "description": "A non-negative integer that limits the number of levels of the index hierarchy that are returned. If `depth` is 1 (default), only the first index key value is returned. If `depth` is 2, both primary and secondary key values are returned. If `depth` is 0, the depth is the number of slash-separators in the `indexPrefix` field, not counting a slash appearing as the last character of the prefix. If the `indexPrefix` field is empty, the default depth is 1. It is an error for `depth` to be any positive value less than the number of components in `indexPrefix`.",
+ // "format": "int32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "indexPrefix": {
+ // "description": "Restricts the index values returned to be those with a specified prefix for each index key. This field has the form `\"/prefix1/prefix2/...\"`, in order corresponding to the `LogService indexKeys`. Non-empty prefixes must begin with `/`. For example, App Engine's two keys are the module ID and the version ID. Following is the effect of using various values for `indexPrefix`: + `\"/Mod/\"` retrieves `/Mod/10` and `/Mod/11` but not `/ModA/10`. + `\"/Mod` retrieves `/Mod/10`, `/Mod/11` and `/ModA/10` but not `/XXX/33`. + `\"/Mod/1\"` retrieves `/Mod/10` and `/Mod/11` but not `/ModA/10`. + `\"/Mod/10/\"` retrieves `/Mod/10` only. + An empty prefix or `\"/\"` retrieves all values.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "logServicesId": {
+ // "description": "Part of `serviceName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "pageSize": {
+ // "description": "The maximum number of log service index resources to return in one operation.",
+ // "format": "int32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogServiceIndexes` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogServiceIndexes` operation is continued.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `serviceName`. The resource name of a log service whose service indexes are requested. Example: `\"projects/my-project-id/logServices/appengine.googleapis.com\"`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/indexes",
+ // "response": {
+ // "$ref": "ListLogServiceIndexesResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logServices.sinks.create":
+
+type ProjectsLogServicesSinksCreateCall struct {
+ s *Service
+ projectsId string
+ logServicesId string
+ logsink *LogSink
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Create: Creates a log service sink. All log entries from a specified
+// log service are written to the destination.
+func (r *ProjectsLogServicesSinksService) Create(projectsId string, logServicesId string, logsink *LogSink) *ProjectsLogServicesSinksCreateCall {
+ c := &ProjectsLogServicesSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logServicesId = logServicesId
+ c.logsink = logsink
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogServicesSinksCreateCall) QuotaUser(quotaUser string) *ProjectsLogServicesSinksCreateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogServicesSinksCreateCall) Fields(s ...googleapi.Field) *ProjectsLogServicesSinksCreateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogServicesSinksCreateCall) Context(ctx context.Context) *ProjectsLogServicesSinksCreateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogServicesSinksCreateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logServicesId": c.logServicesId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logServices.sinks.create" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogServicesSinksCreateCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a log service sink. All log entries from a specified log service are written to the destination.",
+ // "httpMethod": "POST",
+ // "id": "logging.projects.logServices.sinks.create",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logServicesId"
+ // ],
+ // "parameters": {
+ // "logServicesId": {
+ // "description": "Part of `serviceName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `serviceName`. The resource name of the log service to which the sink is bound.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks",
+ // "request": {
+ // "$ref": "LogSink"
+ // },
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logServices.sinks.delete":
+
+type ProjectsLogServicesSinksDeleteCall struct {
+ s *Service
+ projectsId string
+ logServicesId string
+ sinksId string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Deletes a log service sink. After deletion, no new log
+// entries are written to the destination.
+func (r *ProjectsLogServicesSinksService) Delete(projectsId string, logServicesId string, sinksId string) *ProjectsLogServicesSinksDeleteCall {
+ c := &ProjectsLogServicesSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logServicesId = logServicesId
+ c.sinksId = sinksId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogServicesSinksDeleteCall) QuotaUser(quotaUser string) *ProjectsLogServicesSinksDeleteCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogServicesSinksDeleteCall) Fields(s ...googleapi.Field) *ProjectsLogServicesSinksDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogServicesSinksDeleteCall) Context(ctx context.Context) *ProjectsLogServicesSinksDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogServicesSinksDeleteCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logServicesId": c.logServicesId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logServices.sinks.delete" call.
+// Exactly one of *Empty or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Empty.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogServicesSinksDeleteCall) Do() (*Empty, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Empty{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes a log service sink. After deletion, no new log entries are written to the destination.",
+ // "httpMethod": "DELETE",
+ // "id": "logging.projects.logServices.sinks.delete",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logServicesId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "logServicesId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the log service sink to delete.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}",
+ // "response": {
+ // "$ref": "Empty"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logServices.sinks.get":
+
+type ProjectsLogServicesSinksGetCall struct {
+ s *Service
+ projectsId string
+ logServicesId string
+ sinksId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Gets a log service sink.
+func (r *ProjectsLogServicesSinksService) Get(projectsId string, logServicesId string, sinksId string) *ProjectsLogServicesSinksGetCall {
+ c := &ProjectsLogServicesSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logServicesId = logServicesId
+ c.sinksId = sinksId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogServicesSinksGetCall) QuotaUser(quotaUser string) *ProjectsLogServicesSinksGetCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogServicesSinksGetCall) Fields(s ...googleapi.Field) *ProjectsLogServicesSinksGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLogServicesSinksGetCall) IfNoneMatch(entityTag string) *ProjectsLogServicesSinksGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogServicesSinksGetCall) Context(ctx context.Context) *ProjectsLogServicesSinksGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogServicesSinksGetCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logServicesId": c.logServicesId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logServices.sinks.get" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogServicesSinksGetCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets a log service sink.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.logServices.sinks.get",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logServicesId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "logServicesId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the log service sink to return.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}",
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logServices.sinks.list":
+
+type ProjectsLogServicesSinksListCall struct {
+ s *Service
+ projectsId string
+ logServicesId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Lists log service sinks associated with a log service.
+func (r *ProjectsLogServicesSinksService) List(projectsId string, logServicesId string) *ProjectsLogServicesSinksListCall {
+ c := &ProjectsLogServicesSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logServicesId = logServicesId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogServicesSinksListCall) QuotaUser(quotaUser string) *ProjectsLogServicesSinksListCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogServicesSinksListCall) Fields(s ...googleapi.Field) *ProjectsLogServicesSinksListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLogServicesSinksListCall) IfNoneMatch(entityTag string) *ProjectsLogServicesSinksListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogServicesSinksListCall) Context(ctx context.Context) *ProjectsLogServicesSinksListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogServicesSinksListCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logServicesId": c.logServicesId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logServices.sinks.list" call.
+// Exactly one of *ListLogServiceSinksResponse or error will be non-nil.
+// Any non-2xx status code is an error. Response headers are in either
+// *ListLogServiceSinksResponse.ServerResponse.Header or (if a response
+// was returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLogServicesSinksListCall) Do() (*ListLogServiceSinksResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ListLogServiceSinksResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists log service sinks associated with a log service.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.logServices.sinks.list",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logServicesId"
+ // ],
+ // "parameters": {
+ // "logServicesId": {
+ // "description": "Part of `serviceName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `serviceName`. The log service whose sinks are wanted.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks",
+ // "response": {
+ // "$ref": "ListLogServiceSinksResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logServices.sinks.update":
+
+type ProjectsLogServicesSinksUpdateCall struct {
+ s *Service
+ projectsId string
+ logServicesId string
+ sinksId string
+ logsink *LogSink
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates a log service sink. If the sink does not exist, it is
+// created.
+func (r *ProjectsLogServicesSinksService) Update(projectsId string, logServicesId string, sinksId string, logsink *LogSink) *ProjectsLogServicesSinksUpdateCall {
+ c := &ProjectsLogServicesSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logServicesId = logServicesId
+ c.sinksId = sinksId
+ c.logsink = logsink
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogServicesSinksUpdateCall) QuotaUser(quotaUser string) *ProjectsLogServicesSinksUpdateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogServicesSinksUpdateCall) Fields(s ...googleapi.Field) *ProjectsLogServicesSinksUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogServicesSinksUpdateCall) Context(ctx context.Context) *ProjectsLogServicesSinksUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogServicesSinksUpdateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logServicesId": c.logServicesId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logServices.sinks.update" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogServicesSinksUpdateCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a log service sink. If the sink does not exist, it is created.",
+ // "httpMethod": "PUT",
+ // "id": "logging.projects.logServices.sinks.update",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logServicesId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "logServicesId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the log service sink to update.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}",
+ // "request": {
+ // "$ref": "LogSink"
+ // },
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.delete":
+
+type ProjectsLogsDeleteCall struct {
+ s *Service
+ projectsId string
+ logsId string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Deletes a log and all its log entries. The log will reappear
+// if it receives new entries.
+func (r *ProjectsLogsService) Delete(projectsId string, logsId string) *ProjectsLogsDeleteCall {
+ c := &ProjectsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsId = logsId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsDeleteCall) QuotaUser(quotaUser string) *ProjectsLogsDeleteCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLogsDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsDeleteCall) Context(ctx context.Context) *ProjectsLogsDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logsId": c.logsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.delete" call.
+// Exactly one of *Empty or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Empty.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogsDeleteCall) Do() (*Empty, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Empty{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes a log and all its log entries. The log will reappear if it receives new entries.",
+ // "httpMethod": "DELETE",
+ // "id": "logging.projects.logs.delete",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logsId"
+ // ],
+ // "parameters": {
+ // "logsId": {
+ // "description": "Part of `logName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `logName`. The resource name of the log to be deleted.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs/{logsId}",
+ // "response": {
+ // "$ref": "Empty"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.list":
+
+type ProjectsLogsListCall struct {
+ s *Service
+ projectsId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Lists the logs in the project. Only logs that have entries are
+// listed.
+func (r *ProjectsLogsService) List(projectsId string) *ProjectsLogsListCall {
+ c := &ProjectsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ return c
+}
+
+// PageSize sets the optional parameter "pageSize": The maximum number
+// of results to return.
+func (c *ProjectsLogsListCall) PageSize(pageSize int64) *ProjectsLogsListCall {
+ c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": An opaque token,
+// returned as `nextPageToken` by a prior `ListLogs` operation. If
+// `pageToken` is supplied, then the other fields of this request are
+// ignored, and instead the previous `ListLogs` operation is continued.
+func (c *ProjectsLogsListCall) PageToken(pageToken string) *ProjectsLogsListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsListCall) QuotaUser(quotaUser string) *ProjectsLogsListCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// ServiceIndexPrefix sets the optional parameter "serviceIndexPrefix":
+// The purpose of this field is to restrict the listed logs to those
+// with entries of a certain kind. If `serviceName` is the name of a log
+// service, then this field may contain values for the log service's
+// indexes. Only logs that have entries whose indexes include the values
+// are listed. The format for this field is "/val1/val2.../valN",
+// where `val1` is a value for the first index, `val2` for the second
+// index, etc. An empty value (a single slash) for an index matches all
+// values, and you can omit values for later indexes entirely.
+func (c *ProjectsLogsListCall) ServiceIndexPrefix(serviceIndexPrefix string) *ProjectsLogsListCall {
+ c.urlParams_.Set("serviceIndexPrefix", serviceIndexPrefix)
+ return c
+}
+
+// ServiceName sets the optional parameter "serviceName": If not empty,
+// this field must be a log service name such as
+// "compute.googleapis.com". Only logs associated with that that log
+// service are listed.
+func (c *ProjectsLogsListCall) ServiceName(serviceName string) *ProjectsLogsListCall {
+ c.urlParams_.Set("serviceName", serviceName)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsListCall) Fields(s ...googleapi.Field) *ProjectsLogsListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLogsListCall) IfNoneMatch(entityTag string) *ProjectsLogsListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsListCall) Context(ctx context.Context) *ProjectsLogsListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.list" call.
+// Exactly one of *ListLogsResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ListLogsResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLogsListCall) Do() (*ListLogsResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ListLogsResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists the logs in the project. Only logs that have entries are listed.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.logs.list",
+ // "parameterOrder": [
+ // "projectsId"
+ // ],
+ // "parameters": {
+ // "pageSize": {
+ // "description": "The maximum number of results to return.",
+ // "format": "int32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogs` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogs` operation is continued.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `projectName`. The resource name of the project whose logs are requested. If both `serviceName` and `serviceIndexPrefix` are empty, then all logs with entries in this project are listed.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "serviceIndexPrefix": {
+ // "description": "The purpose of this field is to restrict the listed logs to those with entries of a certain kind. If `serviceName` is the name of a log service, then this field may contain values for the log service's indexes. Only logs that have entries whose indexes include the values are listed. The format for this field is `\"/val1/val2.../valN\"`, where `val1` is a value for the first index, `val2` for the second index, etc. An empty value (a single slash) for an index matches all values, and you can omit values for later indexes entirely.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "serviceName": {
+ // "description": "If not empty, this field must be a log service name such as `\"compute.googleapis.com\"`. Only logs associated with that that log service are listed.",
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs",
+ // "response": {
+ // "$ref": "ListLogsResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.entries.write":
+
+type ProjectsLogsEntriesWriteCall struct {
+ s *Service
+ projectsId string
+ logsId string
+ writelogentriesrequest *WriteLogEntriesRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Write: Writes log entries to Cloud Logging. Each entry consists of a
+// `LogEntry` object. You must fill in all the fields of the object,
+// including one of the payload fields. You may supply a map,
+// `commonLabels`, that holds default (key, value) data for the
+// `entries[].metadata.labels` map in each entry, saving you the trouble
+// of creating identical copies for each entry.
+func (r *ProjectsLogsEntriesService) Write(projectsId string, logsId string, writelogentriesrequest *WriteLogEntriesRequest) *ProjectsLogsEntriesWriteCall {
+ c := &ProjectsLogsEntriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsId = logsId
+ c.writelogentriesrequest = writelogentriesrequest
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsEntriesWriteCall) QuotaUser(quotaUser string) *ProjectsLogsEntriesWriteCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsEntriesWriteCall) Fields(s ...googleapi.Field) *ProjectsLogsEntriesWriteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsEntriesWriteCall) Context(ctx context.Context) *ProjectsLogsEntriesWriteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsEntriesWriteCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}/entries:write")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logsId": c.logsId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.entries.write" call.
+// Exactly one of *WriteLogEntriesResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *WriteLogEntriesResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLogsEntriesWriteCall) Do() (*WriteLogEntriesResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &WriteLogEntriesResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Writes log entries to Cloud Logging. Each entry consists of a `LogEntry` object. You must fill in all the fields of the object, including one of the payload fields. You may supply a map, `commonLabels`, that holds default (key, value) data for the `entries[].metadata.labels` map in each entry, saving you the trouble of creating identical copies for each entry.",
+ // "httpMethod": "POST",
+ // "id": "logging.projects.logs.entries.write",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logsId"
+ // ],
+ // "parameters": {
+ // "logsId": {
+ // "description": "Part of `logName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `logName`. The resource name of the log that will receive the log entries.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs/{logsId}/entries:write",
+ // "request": {
+ // "$ref": "WriteLogEntriesRequest"
+ // },
+ // "response": {
+ // "$ref": "WriteLogEntriesResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.write"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.sinks.create":
+
+type ProjectsLogsSinksCreateCall struct {
+ s *Service
+ projectsId string
+ logsId string
+ logsink *LogSink
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Create: Creates a log sink. All log entries for a specified log are
+// written to the destination.
+func (r *ProjectsLogsSinksService) Create(projectsId string, logsId string, logsink *LogSink) *ProjectsLogsSinksCreateCall {
+ c := &ProjectsLogsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsId = logsId
+ c.logsink = logsink
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsSinksCreateCall) QuotaUser(quotaUser string) *ProjectsLogsSinksCreateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsSinksCreateCall) Fields(s ...googleapi.Field) *ProjectsLogsSinksCreateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsSinksCreateCall) Context(ctx context.Context) *ProjectsLogsSinksCreateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsSinksCreateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}/sinks")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logsId": c.logsId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.sinks.create" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogsSinksCreateCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a log sink. All log entries for a specified log are written to the destination.",
+ // "httpMethod": "POST",
+ // "id": "logging.projects.logs.sinks.create",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logsId"
+ // ],
+ // "parameters": {
+ // "logsId": {
+ // "description": "Part of `logName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `logName`. The resource name of the log to which to the sink is bound.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks",
+ // "request": {
+ // "$ref": "LogSink"
+ // },
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.sinks.delete":
+
+type ProjectsLogsSinksDeleteCall struct {
+ s *Service
+ projectsId string
+ logsId string
+ sinksId string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Deletes a log sink. After deletion, no new log entries are
+// written to the destination.
+func (r *ProjectsLogsSinksService) Delete(projectsId string, logsId string, sinksId string) *ProjectsLogsSinksDeleteCall {
+ c := &ProjectsLogsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsId = logsId
+ c.sinksId = sinksId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsSinksDeleteCall) QuotaUser(quotaUser string) *ProjectsLogsSinksDeleteCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsSinksDeleteCall) Fields(s ...googleapi.Field) *ProjectsLogsSinksDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsSinksDeleteCall) Context(ctx context.Context) *ProjectsLogsSinksDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsSinksDeleteCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logsId": c.logsId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.sinks.delete" call.
+// Exactly one of *Empty or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Empty.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogsSinksDeleteCall) Do() (*Empty, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Empty{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes a log sink. After deletion, no new log entries are written to the destination.",
+ // "httpMethod": "DELETE",
+ // "id": "logging.projects.logs.sinks.delete",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logsId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "logsId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the log sink to delete.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}",
+ // "response": {
+ // "$ref": "Empty"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.sinks.get":
+
+type ProjectsLogsSinksGetCall struct {
+ s *Service
+ projectsId string
+ logsId string
+ sinksId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Gets a log sink.
+func (r *ProjectsLogsSinksService) Get(projectsId string, logsId string, sinksId string) *ProjectsLogsSinksGetCall {
+ c := &ProjectsLogsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsId = logsId
+ c.sinksId = sinksId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsSinksGetCall) QuotaUser(quotaUser string) *ProjectsLogsSinksGetCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsSinksGetCall) Fields(s ...googleapi.Field) *ProjectsLogsSinksGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLogsSinksGetCall) IfNoneMatch(entityTag string) *ProjectsLogsSinksGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsSinksGetCall) Context(ctx context.Context) *ProjectsLogsSinksGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsSinksGetCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logsId": c.logsId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.sinks.get" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogsSinksGetCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets a log sink.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.logs.sinks.get",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logsId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "logsId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the log sink to return.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}",
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.sinks.list":
+
+type ProjectsLogsSinksListCall struct {
+ s *Service
+ projectsId string
+ logsId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Lists log sinks associated with a log.
+func (r *ProjectsLogsSinksService) List(projectsId string, logsId string) *ProjectsLogsSinksListCall {
+ c := &ProjectsLogsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsId = logsId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsSinksListCall) QuotaUser(quotaUser string) *ProjectsLogsSinksListCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsSinksListCall) Fields(s ...googleapi.Field) *ProjectsLogsSinksListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLogsSinksListCall) IfNoneMatch(entityTag string) *ProjectsLogsSinksListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsSinksListCall) Context(ctx context.Context) *ProjectsLogsSinksListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsSinksListCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}/sinks")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logsId": c.logsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.sinks.list" call.
+// Exactly one of *ListLogSinksResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ListLogSinksResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLogsSinksListCall) Do() (*ListLogSinksResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ListLogSinksResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists log sinks associated with a log.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.logs.sinks.list",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logsId"
+ // ],
+ // "parameters": {
+ // "logsId": {
+ // "description": "Part of `logName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `logName`. The log whose sinks are wanted. For example, `\"compute.google.com/syslog\"`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks",
+ // "response": {
+ // "$ref": "ListLogSinksResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.logs.sinks.update":
+
+type ProjectsLogsSinksUpdateCall struct {
+ s *Service
+ projectsId string
+ logsId string
+ sinksId string
+ logsink *LogSink
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates a log sink. If the sink does not exist, it is
+// created.
+func (r *ProjectsLogsSinksService) Update(projectsId string, logsId string, sinksId string, logsink *LogSink) *ProjectsLogsSinksUpdateCall {
+ c := &ProjectsLogsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsId = logsId
+ c.sinksId = sinksId
+ c.logsink = logsink
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsLogsSinksUpdateCall) QuotaUser(quotaUser string) *ProjectsLogsSinksUpdateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLogsSinksUpdateCall) Fields(s ...googleapi.Field) *ProjectsLogsSinksUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLogsSinksUpdateCall) Context(ctx context.Context) *ProjectsLogsSinksUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsLogsSinksUpdateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "logsId": c.logsId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.logs.sinks.update" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsLogsSinksUpdateCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a log sink. If the sink does not exist, it is created.",
+ // "httpMethod": "PUT",
+ // "id": "logging.projects.logs.sinks.update",
+ // "parameterOrder": [
+ // "projectsId",
+ // "logsId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "logsId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the sink to update.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}",
+ // "request": {
+ // "$ref": "LogSink"
+ // },
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.metrics.create":
+
+type ProjectsMetricsCreateCall struct {
+ s *Service
+ projectsId string
+ logmetric *LogMetric
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Create: Creates a logs-based metric.
+func (r *ProjectsMetricsService) Create(projectsId string, logmetric *LogMetric) *ProjectsMetricsCreateCall {
+ c := &ProjectsMetricsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logmetric = logmetric
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsMetricsCreateCall) QuotaUser(quotaUser string) *ProjectsMetricsCreateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsMetricsCreateCall) Fields(s ...googleapi.Field) *ProjectsMetricsCreateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsMetricsCreateCall) Context(ctx context.Context) *ProjectsMetricsCreateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsMetricsCreateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/metrics")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.metrics.create" call.
+// Exactly one of *LogMetric or error will be non-nil. Any non-2xx
+// status code is an error. Response headers are in either
+// *LogMetric.ServerResponse.Header or (if a response was returned at
+// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
+// to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *ProjectsMetricsCreateCall) Do() (*LogMetric, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogMetric{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a logs-based metric.",
+ // "httpMethod": "POST",
+ // "id": "logging.projects.metrics.create",
+ // "parameterOrder": [
+ // "projectsId"
+ // ],
+ // "parameters": {
+ // "projectsId": {
+ // "description": "Part of `projectName`. The resource name of the project in which to create the metric.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/metrics",
+ // "request": {
+ // "$ref": "LogMetric"
+ // },
+ // "response": {
+ // "$ref": "LogMetric"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.write"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.metrics.delete":
+
+type ProjectsMetricsDeleteCall struct {
+ s *Service
+ projectsId string
+ metricsId string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Deletes a logs-based metric.
+func (r *ProjectsMetricsService) Delete(projectsId string, metricsId string) *ProjectsMetricsDeleteCall {
+ c := &ProjectsMetricsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.metricsId = metricsId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsMetricsDeleteCall) QuotaUser(quotaUser string) *ProjectsMetricsDeleteCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsMetricsDeleteCall) Fields(s ...googleapi.Field) *ProjectsMetricsDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsMetricsDeleteCall) Context(ctx context.Context) *ProjectsMetricsDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsMetricsDeleteCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/metrics/{metricsId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "metricsId": c.metricsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.metrics.delete" call.
+// Exactly one of *Empty or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Empty.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsMetricsDeleteCall) Do() (*Empty, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Empty{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes a logs-based metric.",
+ // "httpMethod": "DELETE",
+ // "id": "logging.projects.metrics.delete",
+ // "parameterOrder": [
+ // "projectsId",
+ // "metricsId"
+ // ],
+ // "parameters": {
+ // "metricsId": {
+ // "description": "Part of `metricName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `metricName`. The resource name of the metric to delete.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/metrics/{metricsId}",
+ // "response": {
+ // "$ref": "Empty"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.write"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.metrics.get":
+
+type ProjectsMetricsGetCall struct {
+ s *Service
+ projectsId string
+ metricsId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Gets a logs-based metric.
+func (r *ProjectsMetricsService) Get(projectsId string, metricsId string) *ProjectsMetricsGetCall {
+ c := &ProjectsMetricsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.metricsId = metricsId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsMetricsGetCall) QuotaUser(quotaUser string) *ProjectsMetricsGetCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsMetricsGetCall) Fields(s ...googleapi.Field) *ProjectsMetricsGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsMetricsGetCall) IfNoneMatch(entityTag string) *ProjectsMetricsGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsMetricsGetCall) Context(ctx context.Context) *ProjectsMetricsGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsMetricsGetCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/metrics/{metricsId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "metricsId": c.metricsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.metrics.get" call.
+// Exactly one of *LogMetric or error will be non-nil. Any non-2xx
+// status code is an error. Response headers are in either
+// *LogMetric.ServerResponse.Header or (if a response was returned at
+// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
+// to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *ProjectsMetricsGetCall) Do() (*LogMetric, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogMetric{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets a logs-based metric.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.metrics.get",
+ // "parameterOrder": [
+ // "projectsId",
+ // "metricsId"
+ // ],
+ // "parameters": {
+ // "metricsId": {
+ // "description": "Part of `metricName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `metricName`. The resource name of the desired metric.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/metrics/{metricsId}",
+ // "response": {
+ // "$ref": "LogMetric"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.metrics.list":
+
+type ProjectsMetricsListCall struct {
+ s *Service
+ projectsId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Lists the logs-based metrics associated with a project.
+func (r *ProjectsMetricsService) List(projectsId string) *ProjectsMetricsListCall {
+ c := &ProjectsMetricsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ return c
+}
+
+// PageSize sets the optional parameter "pageSize": The maximum number
+// of `LogMetric` objects to return in one operation.
+func (c *ProjectsMetricsListCall) PageSize(pageSize int64) *ProjectsMetricsListCall {
+ c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": An opaque token,
+// returned as `nextPageToken` by a prior `ListLogMetrics` operation. If
+// `pageToken` is supplied, then the other fields of this request are
+// ignored, and instead the previous `ListLogMetrics` operation is
+// continued.
+func (c *ProjectsMetricsListCall) PageToken(pageToken string) *ProjectsMetricsListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsMetricsListCall) QuotaUser(quotaUser string) *ProjectsMetricsListCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsMetricsListCall) Fields(s ...googleapi.Field) *ProjectsMetricsListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsMetricsListCall) IfNoneMatch(entityTag string) *ProjectsMetricsListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsMetricsListCall) Context(ctx context.Context) *ProjectsMetricsListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsMetricsListCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/metrics")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.metrics.list" call.
+// Exactly one of *ListLogMetricsResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ListLogMetricsResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsMetricsListCall) Do() (*ListLogMetricsResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ListLogMetricsResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists the logs-based metrics associated with a project.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.metrics.list",
+ // "parameterOrder": [
+ // "projectsId"
+ // ],
+ // "parameters": {
+ // "pageSize": {
+ // "description": "The maximum number of `LogMetric` objects to return in one operation.",
+ // "format": "int32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "An opaque token, returned as `nextPageToken` by a prior `ListLogMetrics` operation. If `pageToken` is supplied, then the other fields of this request are ignored, and instead the previous `ListLogMetrics` operation is continued.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `projectName`. The resource name for the project whose metrics are wanted.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/metrics",
+ // "response": {
+ // "$ref": "ListLogMetricsResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.metrics.update":
+
+type ProjectsMetricsUpdateCall struct {
+ s *Service
+ projectsId string
+ metricsId string
+ logmetric *LogMetric
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Creates or updates a logs-based metric.
+func (r *ProjectsMetricsService) Update(projectsId string, metricsId string, logmetric *LogMetric) *ProjectsMetricsUpdateCall {
+ c := &ProjectsMetricsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.metricsId = metricsId
+ c.logmetric = logmetric
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsMetricsUpdateCall) QuotaUser(quotaUser string) *ProjectsMetricsUpdateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsMetricsUpdateCall) Fields(s ...googleapi.Field) *ProjectsMetricsUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsMetricsUpdateCall) Context(ctx context.Context) *ProjectsMetricsUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsMetricsUpdateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/metrics/{metricsId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "metricsId": c.metricsId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.metrics.update" call.
+// Exactly one of *LogMetric or error will be non-nil. Any non-2xx
+// status code is an error. Response headers are in either
+// *LogMetric.ServerResponse.Header or (if a response was returned at
+// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
+// to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *ProjectsMetricsUpdateCall) Do() (*LogMetric, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogMetric{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates or updates a logs-based metric.",
+ // "httpMethod": "PUT",
+ // "id": "logging.projects.metrics.update",
+ // "parameterOrder": [
+ // "projectsId",
+ // "metricsId"
+ // ],
+ // "parameters": {
+ // "metricsId": {
+ // "description": "Part of `metricName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectsId": {
+ // "description": "Part of `metricName`. The resource name of the metric to update.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/metrics/{metricsId}",
+ // "request": {
+ // "$ref": "LogMetric"
+ // },
+ // "response": {
+ // "$ref": "LogMetric"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.write"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.sinks.create":
+
+type ProjectsSinksCreateCall struct {
+ s *Service
+ projectsId string
+ logsink *LogSink
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Create: Creates a project sink. A logs filter determines which log
+// entries are written to the destination.
+func (r *ProjectsSinksService) Create(projectsId string, logsink *LogSink) *ProjectsSinksCreateCall {
+ c := &ProjectsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.logsink = logsink
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsSinksCreateCall) QuotaUser(quotaUser string) *ProjectsSinksCreateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsSinksCreateCall) Fields(s ...googleapi.Field) *ProjectsSinksCreateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsSinksCreateCall) Context(ctx context.Context) *ProjectsSinksCreateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsSinksCreateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/sinks")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.sinks.create" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsSinksCreateCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a project sink. A logs filter determines which log entries are written to the destination.",
+ // "httpMethod": "POST",
+ // "id": "logging.projects.sinks.create",
+ // "parameterOrder": [
+ // "projectsId"
+ // ],
+ // "parameters": {
+ // "projectsId": {
+ // "description": "Part of `projectName`. The resource name of the project to which the sink is bound.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/sinks",
+ // "request": {
+ // "$ref": "LogSink"
+ // },
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.sinks.delete":
+
+type ProjectsSinksDeleteCall struct {
+ s *Service
+ projectsId string
+ sinksId string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Deletes a project sink. After deletion, no new log entries
+// are written to the destination.
+func (r *ProjectsSinksService) Delete(projectsId string, sinksId string) *ProjectsSinksDeleteCall {
+ c := &ProjectsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.sinksId = sinksId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsSinksDeleteCall) QuotaUser(quotaUser string) *ProjectsSinksDeleteCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsSinksDeleteCall) Fields(s ...googleapi.Field) *ProjectsSinksDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsSinksDeleteCall) Context(ctx context.Context) *ProjectsSinksDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsSinksDeleteCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.sinks.delete" call.
+// Exactly one of *Empty or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Empty.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsSinksDeleteCall) Do() (*Empty, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Empty{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes a project sink. After deletion, no new log entries are written to the destination.",
+ // "httpMethod": "DELETE",
+ // "id": "logging.projects.sinks.delete",
+ // "parameterOrder": [
+ // "projectsId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the project sink to delete.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/sinks/{sinksId}",
+ // "response": {
+ // "$ref": "Empty"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.sinks.get":
+
+type ProjectsSinksGetCall struct {
+ s *Service
+ projectsId string
+ sinksId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Gets a project sink.
+func (r *ProjectsSinksService) Get(projectsId string, sinksId string) *ProjectsSinksGetCall {
+ c := &ProjectsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.sinksId = sinksId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsSinksGetCall) QuotaUser(quotaUser string) *ProjectsSinksGetCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsSinksGetCall) Fields(s ...googleapi.Field) *ProjectsSinksGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsSinksGetCall) IfNoneMatch(entityTag string) *ProjectsSinksGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsSinksGetCall) Context(ctx context.Context) *ProjectsSinksGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsSinksGetCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.sinks.get" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsSinksGetCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets a project sink.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.sinks.get",
+ // "parameterOrder": [
+ // "projectsId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the project sink to return.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/sinks/{sinksId}",
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.sinks.list":
+
+type ProjectsSinksListCall struct {
+ s *Service
+ projectsId string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Lists project sinks associated with a project.
+func (r *ProjectsSinksService) List(projectsId string) *ProjectsSinksListCall {
+ c := &ProjectsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsSinksListCall) QuotaUser(quotaUser string) *ProjectsSinksListCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsSinksListCall) Fields(s ...googleapi.Field) *ProjectsSinksListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsSinksListCall) IfNoneMatch(entityTag string) *ProjectsSinksListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsSinksListCall) Context(ctx context.Context) *ProjectsSinksListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsSinksListCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/sinks")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ req.Header.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.sinks.list" call.
+// Exactly one of *ListSinksResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ListSinksResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsSinksListCall) Do() (*ListSinksResponse, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ListSinksResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists project sinks associated with a project.",
+ // "httpMethod": "GET",
+ // "id": "logging.projects.sinks.list",
+ // "parameterOrder": [
+ // "projectsId"
+ // ],
+ // "parameters": {
+ // "projectsId": {
+ // "description": "Part of `projectName`. The project whose sinks are wanted.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/sinks",
+ // "response": {
+ // "$ref": "ListSinksResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/logging.admin",
+ // "https://www.googleapis.com/auth/logging.read"
+ // ]
+ // }
+
+}
+
+// method id "logging.projects.sinks.update":
+
+type ProjectsSinksUpdateCall struct {
+ s *Service
+ projectsId string
+ sinksId string
+ logsink *LogSink
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates a project sink. If the sink does not exist, it is
+// created. The destination, filter, or both may be updated.
+func (r *ProjectsSinksService) Update(projectsId string, sinksId string, logsink *LogSink) *ProjectsSinksUpdateCall {
+ c := &ProjectsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.projectsId = projectsId
+ c.sinksId = sinksId
+ c.logsink = logsink
+ return c
+}
+
+// QuotaUser sets the optional parameter "quotaUser": Available to use
+// for quota purposes for server-side applications. Can be any arbitrary
+// string assigned to a user, but should not exceed 40 characters.
+func (c *ProjectsSinksUpdateCall) QuotaUser(quotaUser string) *ProjectsSinksUpdateCall {
+ c.urlParams_.Set("quotaUser", quotaUser)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsSinksUpdateCall) Fields(s ...googleapi.Field) *ProjectsSinksUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsSinksUpdateCall) Context(ctx context.Context) *ProjectsSinksUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ProjectsSinksUpdateCall) doRequest(alt string) (*http.Response, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/sinks/{sinksId}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectsId": c.projectsId,
+ "sinksId": c.sinksId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "logging.projects.sinks.update" call.
+// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *LogSink.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ProjectsSinksUpdateCall) Do() (*LogSink, error) {
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &LogSink{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a project sink. If the sink does not exist, it is created. The destination, filter, or both may be updated.",
+ // "httpMethod": "PUT",
+ // "id": "logging.projects.sinks.update",
+ // "parameterOrder": [
+ // "projectsId",
+ // "sinksId"
+ // ],
+ // "parameters": {
+ // "projectsId": {
+ // "description": "Part of `sinkName`. The resource name of the project sink to update.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sinksId": {
+ // "description": "Part of `sinkName`. See documentation of `projectsId`.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "v1beta3/projects/{projectsId}/sinks/{sinksId}",
+ // "request": {
+ // "$ref": "LogSink"
+ // },
+ // "response": {
+ // "$ref": "LogSink"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/logging.admin"
+ // ]
+ // }
+
+}
diff --git a/vendor/src/google.golang.org/cloud/.travis.yml b/vendor/src/google.golang.org/cloud/.travis.yml
new file mode 100644
index 0000000000..c037df0de0
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/.travis.yml
@@ -0,0 +1,11 @@
+sudo: false
+language: go
+go:
+- 1.4
+- 1.5
+install:
+- go get -v google.golang.org/cloud/...
+script:
+- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
+- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
+ go test -v -tags=integration google.golang.org/cloud/...
diff --git a/vendor/src/google.golang.org/cloud/AUTHORS b/vendor/src/google.golang.org/cloud/AUTHORS
new file mode 100644
index 0000000000..3da443dc9f
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/AUTHORS
@@ -0,0 +1,12 @@
+# This is the official list of cloud authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as:
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+Google Inc.
+Palm Stone Games, Inc.
+Péter Szilágyi <peterke@gmail.com>
+Tyler Treat <ttreat31@gmail.com>
diff --git a/vendor/src/google.golang.org/cloud/CONTRIBUTING.md b/vendor/src/google.golang.org/cloud/CONTRIBUTING.md
new file mode 100644
index 0000000000..9a1cab2878
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/CONTRIBUTING.md
@@ -0,0 +1,114 @@
+# Contributing
+
+1. Sign one of the contributor license agreements below.
+1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
+1. Get the cloud package by running `go get -d google.golang.org/cloud`.
+ 1. If you have already checked out the source, make sure that the remote git
+ origin is https://code.googlesource.com/gocloud:
+
+ git remote set-url origin https://code.googlesource.com/gocloud
+1. Make changes and create a change by running `git codereview change <name>`,
+provide a command message, and use `git codereview mail` to create a Gerrit CL.
+1. Keep amending to the change and mail as your recieve feedback.
+
+## Integration Tests
+
+Additional to the unit tests, you may run the integration test suite.
+
+To run the integrations tests, creating and configuration of a project in the
+Google Developers Console is required. Once you create a project, set the
+following environment variables to be able to run the against the actual APIs.
+
+- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
+- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
+
+Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**.
+The storage integration test will create and delete some objects in this bucket.
+
+Install the [gcloud command-line tool][gcloudcli] to your machine and use it
+to create the indexes used in the datastore integration tests with indexes
+found in `datastore/testdata/index.yaml`:
+
+From the project's root directory:
+
+``` sh
+# Install the app component
+$ gcloud components update app
+
+# Set the default project in your env
+$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
+
+# Authenticate the gcloud tool with your account
+$ gcloud auth login
+
+# Create the indexes
+$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
+
+```
+
+You can run the integration tests by running:
+
+``` sh
+$ go test -v -tags=integration google.golang.org/cloud/...
+```
+
+## Contributor License Agreements
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
+- **If you work for a company that wants to allow you to contribute your work**,
+then you'll need to sign a [corporate CLA][corpcla].
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+
+[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
+[indvcla]: https://developers.google.com/open-source/cla/individual
+[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/src/google.golang.org/cloud/CONTRIBUTORS b/vendor/src/google.golang.org/cloud/CONTRIBUTORS
new file mode 100644
index 0000000000..475ac6a667
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/CONTRIBUTORS
@@ -0,0 +1,24 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+# Name <email address>
+
+# Keep the list alphabetically sorted.
+
+Andrew Gerrand <adg@golang.org>
+Brad Fitzpatrick <bradfitz@golang.org>
+Burcu Dogan <jbd@google.com>
+Dave Day <djd@golang.org>
+David Symonds <dsymonds@golang.org>
+Glenn Lewis <gmlewis@google.com>
+Johan Euphrosine <proppy@google.com>
+Luna Duclos <luna.duclos@palmstonegames.com>
+Michael McGreevy <mcgreevy@golang.org>
+Péter Szilágyi <peterke@gmail.com>
+Tyler Treat <ttreat31@gmail.com>
diff --git a/vendor/src/google.golang.org/cloud/LICENSE b/vendor/src/google.golang.org/cloud/LICENSE
new file mode 100644
index 0000000000..a4c5efd822
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2014 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/google.golang.org/cloud/README.md b/vendor/src/google.golang.org/cloud/README.md
new file mode 100644
index 0000000000..10d3995d58
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/README.md
@@ -0,0 +1,135 @@
+# Google Cloud for Go
+
+[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang)
+
+**NOTE:** These packages are experimental, and may occasionally make
+backwards-incompatible changes.
+
+**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
+
+Go packages for Google Cloud Platform services. Supported APIs include:
+
+ * Google Cloud Datastore
+ * Google Cloud Storage
+ * Google Cloud Pub/Sub
+ * Google Cloud Container Engine
+
+``` go
+import "google.golang.org/cloud"
+```
+
+Documentation and examples are available at
+[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud).
+
+## Authorization
+
+Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2.
+Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2)
+for examples on using oauth2 with the Cloud package.
+
+## Google Cloud Datastore
+
+[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully
+managed, schemaless database for storing non-relational data. Cloud Datastore
+automatically scales with your users and supports ACID transactions, high availability
+of reads and writes, strong consistency for reads and ancestor queries, and eventual
+consistency for all other queries.
+
+Follow the [activation instructions][cloud-datastore-activation] to use the Google
+Cloud Datastore API with your project.
+
+[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore)
+
+
+```go
+type Post struct {
+ Title string
+ Body string `datastore:",noindex"`
+ PublishedAt time.Time
+}
+keys := []*datastore.Key{
+ datastore.NewKey(ctx, "Post", "post1", 0, nil),
+ datastore.NewKey(ctx, "Post", "post2", 0, nil),
+}
+posts := []*Post{
+ {Title: "Post 1", Body: "...", PublishedAt: time.Now()},
+ {Title: "Post 2", Body: "...", PublishedAt: time.Now()},
+}
+if _, err := datastore.PutMulti(ctx, keys, posts); err != nil {
+ log.Println(err)
+}
+```
+
+## Google Cloud Storage
+
+[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
+data on Google infrastructure with very high reliability, performance and availability,
+and can be used to distribute large data objects to users via direct download.
+
+[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage)
+
+
+```go
+// Read the object1 from bucket.
+rc, err := storage.NewReader(ctx, "bucket", "object1")
+if err != nil {
+ log.Fatal(err)
+}
+slurp, err := ioutil.ReadAll(rc)
+rc.Close()
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+## Google Cloud Pub/Sub (Alpha)
+
+> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in
+> backward-incompatible ways and is not recommended for production use. It is not
+> subject to any SLA or deprecation policy.
+
+[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
+your services with reliable, many-to-many, asynchronous messaging hosted on Google's
+infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
+for building your own robust, global services.
+
+[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub)
+
+
+```go
+// Publish "hello world" on topic1.
+msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{
+ Data: []byte("hello world"),
+})
+if err != nil {
+ log.Println(err)
+}
+// Pull messages via subscription1.
+msgs, err := pubsub.Pull(ctx, "subscription1", 1)
+if err != nil {
+ log.Println(err)
+}
+```
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md)
+document for details. We're using Gerrit for our code reviews. Please don't open pull
+requests against this repo, new pull requests will be automatically closed.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
+
+[cloud-datastore]: https://cloud.google.com/datastore/
+[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
+[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
+
+[cloud-pubsub]: https://cloud.google.com/pubsub/
+[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
+
+[cloud-storage]: https://cloud.google.com/storage/
+[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
+[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
diff --git a/vendor/src/google.golang.org/cloud/cloud.go b/vendor/src/google.golang.org/cloud/cloud.go
new file mode 100644
index 0000000000..96d36baf2c
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/cloud.go
@@ -0,0 +1,49 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cloud contains Google Cloud Platform APIs related types
+// and common functions.
+package cloud // import "google.golang.org/cloud"
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "google.golang.org/cloud/internal"
+)
+
+// NewContext returns a new context that uses the provided http.Client.
+// Provided http.Client is responsible to authorize and authenticate
+// the requests made to the Google Cloud APIs.
+// It mutates the client's original Transport to append the cloud
+// package's user-agent to the outgoing requests.
+// You can obtain the project ID from the Google Developers Console,
+// https://console.developers.google.com.
+func NewContext(projID string, c *http.Client) context.Context {
+ if c == nil {
+ panic("invalid nil *http.Client passed to NewContext")
+ }
+ return WithContext(context.Background(), projID, c)
+}
+
+// WithContext returns a new context in a similar way NewContext does,
+// but initiates the new context with the specified parent.
+func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
+ // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
+ // Do User-Agent some other way.
+ if _, ok := c.Transport.(*internal.Transport); !ok {
+ c.Transport = &internal.Transport{Base: c.Transport}
+ }
+ return internal.WithContext(parent, projID, c)
+}
diff --git a/vendor/src/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/src/google.golang.org/cloud/compute/metadata/metadata.go
new file mode 100644
index 0000000000..972972dd76
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/compute/metadata/metadata.go
@@ -0,0 +1,327 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata // import "google.golang.org/cloud/compute/metadata"
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/cloud/internal"
+)
+
+type cachedValue struct {
+ k string
+ trim bool
+ mu sync.Mutex
+ v string
+}
+
+var (
+ projID = &cachedValue{k: "project/project-id", trim: true}
+ projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+ instID = &cachedValue{k: "instance/id", trim: true}
+)
+
+var metaClient = &http.Client{
+ Transport: &internal.Transport{
+ Base: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 750 * time.Millisecond,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ ResponseHeaderTimeout: 750 * time.Millisecond,
+ },
+ },
+}
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+ return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+ val, _, err := getETag(suffix)
+ return val, err
+}
+
+// getETag returns a value from the metadata service as well as the associated
+// ETag. This func is otherwise equivalent to Get.
+func getETag(suffix string) (value, etag string, err error) {
+ // Using a fixed IP makes it very difficult to spoof the metadata service in
+ // a container, which is an important use-case for local testing of cloud
+ // deployments. To enable spoofing of the metadata service, the environment
+ // variable GCE_METADATA_HOST is first inspected to decide where metadata
+ // requests shall go.
+ host := os.Getenv("GCE_METADATA_HOST")
+ if host == "" {
+ // Using 169.254.169.254 instead of "metadata" here because Go
+ // binaries built with the "netgo" tag and without cgo won't
+ // know the search suffix for "metadata" is
+ // ".google.internal", and this IP address is documented as
+ // being stable anyway.
+ host = "169.254.169.254"
+ }
+ url := "http://" + host + "/computeMetadata/v1/" + suffix
+ req, _ := http.NewRequest("GET", url, nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ res, err := metaClient.Do(req)
+ if err != nil {
+ return "", "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return "", "", NotDefinedError(suffix)
+ }
+ if res.StatusCode != 200 {
+ return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+ }
+ all, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", "", err
+ }
+ return string(all), res.Header.Get("Etag"), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+ s, err = Get(suffix)
+ s = strings.TrimSpace(s)
+ return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+ defer c.mu.Unlock()
+ c.mu.Lock()
+ if c.v != "" {
+ return c.v, nil
+ }
+ if c.trim {
+ v, err = getTrimmed(c.k)
+ } else {
+ v, err = Get(c.k)
+ }
+ if err == nil {
+ c.v = v
+ }
+ return
+}
+
+var onGCE struct {
+ sync.Mutex
+ set bool
+ v bool
+}
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+ defer onGCE.Unlock()
+ onGCE.Lock()
+ if onGCE.set {
+ return onGCE.v
+ }
+ onGCE.set = true
+
+ // We use the DNS name of the metadata service here instead of the IP address
+ // because we expect that to fail faster in the not-on-GCE case.
+ res, err := metaClient.Get("http://metadata.google.internal")
+ if err != nil {
+ return false
+ }
+ onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
+ return onGCE.v
+}
+
+// Subscribe subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+ const failedSubscribeSleep = time.Second * 5
+
+ // First check to see if the metadata value exists at all.
+ val, lastETag, err := getETag(suffix)
+ if err != nil {
+ return err
+ }
+
+ if err := fn(val, true); err != nil {
+ return err
+ }
+
+ ok := true
+ suffix += "?wait_for_change=true&last_etag="
+ for {
+ val, etag, err := getETag(suffix + url.QueryEscape(lastETag))
+ if err != nil {
+ if _, deleted := err.(NotDefinedError); !deleted {
+ time.Sleep(failedSubscribeSleep)
+ continue // Retry on other errors.
+ }
+ ok = false
+ }
+ lastETag = etag
+
+ if err := fn(val, ok); err != nil || !ok {
+ return err
+ }
+ }
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will be of the form
+// "<instanceID>.c.<projID>.internal".
+func Hostname() (string, error) {
+ return getTrimmed("instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+ var s []string
+ j, err := Get("instance/tags")
+ if err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+ return instID.get()
+}
+
+// InstanceName returns the current VM's instance ID string.
+func InstanceName() (string, error) {
+ host, err := Hostname()
+ if err != nil {
+ return "", err
+ }
+ return strings.Split(host, ".")[0], nil
+}
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func Zone() (string, error) {
+ zone, err := getTrimmed("instance/zone")
+ // zone is of the form "projects/<projNum>/zones/<zoneName>".
+ if err != nil {
+ return "", err
+ }
+ return zone[strings.LastIndex(zone, "/")+1:], nil
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+ j, err := Get(suffix)
+ if err != nil {
+ return nil, err
+ }
+ s := strings.Split(strings.TrimSpace(j), "\n")
+ for i := range s {
+ s[i] = strings.TrimSpace(s[i])
+ }
+ return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+ return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+ return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+ if serviceAccount == "" {
+ serviceAccount = "default"
+ }
+ return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
diff --git a/vendor/src/google.golang.org/cloud/internal/cloud.go b/vendor/src/google.golang.org/cloud/internal/cloud.go
new file mode 100644
index 0000000000..8b0db1b5da
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/internal/cloud.go
@@ -0,0 +1,128 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal provides support for the cloud packages.
+//
+// Users should not import this package directly.
+package internal
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "golang.org/x/net/context"
+)
+
+type contextKey struct{}
+
+func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
+ if c == nil {
+ panic("nil *http.Client passed to WithContext")
+ }
+ if projID == "" {
+ panic("empty project ID passed to WithContext")
+ }
+ return context.WithValue(parent, contextKey{}, &cloudContext{
+ ProjectID: projID,
+ HTTPClient: c,
+ })
+}
+
+const userAgent = "gcloud-golang/0.1"
+
+type cloudContext struct {
+ ProjectID string
+ HTTPClient *http.Client
+
+ mu sync.Mutex // guards svc
+ svc map[string]interface{} // e.g. "storage" => *rawStorage.Service
+}
+
+// Service returns the result of the fill function if it's never been
+// called before for the given name (which is assumed to be an API
+// service name, like "datastore"). If it has already been cached, the fill
+// func is not run.
+// It's safe for concurrent use by multiple goroutines.
+func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {
+ return cc(ctx).service(name, fill)
+}
+
+func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.svc == nil {
+ c.svc = make(map[string]interface{})
+ } else if v, ok := c.svc[name]; ok {
+ return v
+ }
+ v := fill(c.HTTPClient)
+ c.svc[name] = v
+ return v
+}
+
+// Transport is an http.RoundTripper that appends
+// Google Cloud client's user-agent to the original
+// request's user-agent header.
+type Transport struct {
+ // Base represents the actual http.RoundTripper
+ // the requests will be delegated to.
+ Base http.RoundTripper
+}
+
+// RoundTrip appends a user-agent to the existing user-agent
+// header and delegates the request to the base http.RoundTripper.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req = cloneRequest(req)
+ ua := req.Header.Get("User-Agent")
+ if ua == "" {
+ ua = userAgent
+ } else {
+ ua = fmt.Sprintf("%s %s", ua, userAgent)
+ }
+ req.Header.Set("User-Agent", ua)
+ return t.Base.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return r2
+}
+
+func ProjID(ctx context.Context) string {
+ return cc(ctx).ProjectID
+}
+
+func HTTPClient(ctx context.Context) *http.Client {
+ return cc(ctx).HTTPClient
+}
+
+// cc returns the internal *cloudContext (cc) state for a context.Context.
+// It panics if the user did it wrong.
+func cc(ctx context.Context) *cloudContext {
+ if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
+ return c
+ }
+ panic("invalid context.Context type; it should be created with cloud.NewContext")
+}
diff --git a/vendor/src/google.golang.org/cloud/internal/opts/option.go b/vendor/src/google.golang.org/cloud/internal/opts/option.go
new file mode 100644
index 0000000000..c5ccf4f56d
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/internal/opts/option.go
@@ -0,0 +1,24 @@
+// Package opts holds the DialOpts struct, configurable by
+// cloud.ClientOptions to set up transports for cloud packages.
+//
+// This is a separate page to prevent cycles between the core
+// cloud packages.
+package opts
+
+import (
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/grpc"
+)
+
+type DialOpt struct {
+ Endpoint string
+ Scopes []string
+ UserAgent string
+
+ TokenSource oauth2.TokenSource
+
+ HTTPClient *http.Client
+ GRPCClient *grpc.ClientConn
+}
diff --git a/vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go b/vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go
new file mode 100644
index 0000000000..ddae71ccef
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go
@@ -0,0 +1,29 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.5
+
+package transport
+
+import "net/http"
+
+// makeReqCancel returns a closure that cancels the given http.Request
+// when called.
+func makeReqCancel(req *http.Request) func(http.RoundTripper) {
+ c := make(chan struct{})
+ req.Cancel = c
+ return func(http.RoundTripper) {
+ close(c)
+ }
+}
diff --git a/vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go b/vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go
new file mode 100644
index 0000000000..c11a4ddebc
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go
@@ -0,0 +1,31 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.5
+
+package transport
+
+import "net/http"
+
+// makeReqCancel returns a closure that cancels the given http.Request
+// when called.
+func makeReqCancel(req *http.Request) func(http.RoundTripper) {
+ // Go 1.4 and prior do not have a reliable way of cancelling a request.
+ // Transport.CancelRequest will only work if the request is already in-flight.
+ return func(r http.RoundTripper) {
+ if t, ok := r.(*http.Transport); ok {
+ t.CancelRequest(req)
+ }
+ }
+}
diff --git a/vendor/src/google.golang.org/cloud/internal/transport/dial.go b/vendor/src/google.golang.org/cloud/internal/transport/dial.go
new file mode 100644
index 0000000000..29624410d3
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/internal/transport/dial.go
@@ -0,0 +1,134 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/cloud"
+ "google.golang.org/cloud/internal/opts"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/oauth"
+)
+
+// ErrHTTP is returned when on a non-200 HTTP response.
+type ErrHTTP struct {
+ StatusCode int
+ Body []byte
+ err error
+}
+
+func (e *ErrHTTP) Error() string {
+ if e.err == nil {
+ return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body)
+ }
+ return e.err.Error()
+}
+
+// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
+// service, configured with the given ClientOptions. It also returns the endpoint
+// for the service as specified in the options.
+func NewHTTPClient(ctx context.Context, opt ...cloud.ClientOption) (*http.Client, string, error) {
+ var o opts.DialOpt
+ for _, opt := range opt {
+ opt.Resolve(&o)
+ }
+ if o.GRPCClient != nil {
+ return nil, "", errors.New("unsupported GRPC base transport specified")
+ }
+ // TODO(djd): Wrap all http.Clients with appropriate internal version to add
+ // UserAgent header and prepend correct endpoint.
+ if o.HTTPClient != nil {
+ return o.HTTPClient, o.Endpoint, nil
+ }
+ if o.TokenSource == nil {
+ var err error
+ o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+ if err != nil {
+ return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err)
+ }
+ }
+ return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil
+}
+
+// NewProtoClient returns a ProtoClient for communicating with a Google cloud service,
+// configured with the given ClientOptions.
+func NewProtoClient(ctx context.Context, opt ...cloud.ClientOption) (*ProtoClient, error) {
+ var o opts.DialOpt
+ for _, opt := range opt {
+ opt.Resolve(&o)
+ }
+ if o.GRPCClient != nil {
+ return nil, errors.New("unsupported GRPC base transport specified")
+ }
+ var client *http.Client
+ switch {
+ case o.HTTPClient != nil:
+ if o.TokenSource != nil {
+ return nil, errors.New("at most one of WithTokenSource or WithBaseHTTP may be provided")
+ }
+ client = o.HTTPClient
+ case o.TokenSource != nil:
+ client = oauth2.NewClient(ctx, o.TokenSource)
+ default:
+ var err error
+ client, err = google.DefaultClient(ctx, o.Scopes...)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &ProtoClient{
+ client: client,
+ endpoint: o.Endpoint,
+ userAgent: o.UserAgent,
+ }, nil
+}
+
+// DialGRPC returns a GRPC connection for use communicating with a Google cloud
+// service, configured with the given ClientOptions.
+func DialGRPC(ctx context.Context, opt ...cloud.ClientOption) (*grpc.ClientConn, error) {
+ var o opts.DialOpt
+ for _, opt := range opt {
+ opt.Resolve(&o)
+ }
+ if o.HTTPClient != nil {
+ return nil, errors.New("unsupported HTTP base transport specified")
+ }
+ if o.GRPCClient != nil {
+ return o.GRPCClient, nil
+ }
+ if o.TokenSource == nil {
+ var err error
+ o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+ if err != nil {
+ return nil, fmt.Errorf("google.DefaultTokenSource: %v", err)
+ }
+ }
+ grpcOpts := []grpc.DialOption{
+ grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}),
+ grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
+ }
+ if o.UserAgent != "" {
+ grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
+ }
+ return grpc.Dial(o.Endpoint, grpcOpts...)
+}
diff --git a/vendor/src/google.golang.org/cloud/internal/transport/proto.go b/vendor/src/google.golang.org/cloud/internal/transport/proto.go
new file mode 100644
index 0000000000..05b11cde1e
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/internal/transport/proto.go
@@ -0,0 +1,80 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+)
+
+type ProtoClient struct {
+ client *http.Client
+ endpoint string
+ userAgent string
+}
+
+func (c *ProtoClient) Call(ctx context.Context, method string, req, resp proto.Message) error {
+ payload, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ httpReq, err := http.NewRequest("POST", c.endpoint+method, bytes.NewReader(payload))
+ if err != nil {
+ return err
+ }
+ httpReq.Header.Set("Content-Type", "application/x-protobuf")
+ if ua := c.userAgent; ua != "" {
+ httpReq.Header.Set("User-Agent", ua)
+ }
+
+ errc := make(chan error, 1)
+ cancel := makeReqCancel(httpReq)
+
+ go func() {
+ r, err := c.client.Do(httpReq)
+ if err != nil {
+ errc <- err
+ return
+ }
+ defer r.Body.Close()
+
+ body, err := ioutil.ReadAll(r.Body)
+ if r.StatusCode != http.StatusOK {
+ err = &ErrHTTP{
+ StatusCode: r.StatusCode,
+ Body: body,
+ err: err,
+ }
+ }
+ if err != nil {
+ errc <- err
+ return
+ }
+ errc <- proto.Unmarshal(body, resp)
+ }()
+
+ select {
+ case <-ctx.Done():
+ cancel(c.client.Transport) // Cancel the HTTP request.
+ return ctx.Err()
+ case err := <-errc:
+ return err
+ }
+}
diff --git a/vendor/src/google.golang.org/cloud/key.json.enc b/vendor/src/google.golang.org/cloud/key.json.enc
new file mode 100644
index 0000000000..2f673a84b1
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/key.json.enc
Binary files differ
diff --git a/vendor/src/google.golang.org/cloud/logging/logging.go b/vendor/src/google.golang.org/cloud/logging/logging.go
new file mode 100644
index 0000000000..bd33e26ecf
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/logging/logging.go
@@ -0,0 +1,468 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package logging contains a Google Cloud Logging client.
+//
+// This package is experimental and subject to API changes.
+package logging // import "google.golang.org/cloud/logging"
+
+import (
+ "errors"
+ "io"
+ "log"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ api "google.golang.org/api/logging/v1beta3"
+ "google.golang.org/cloud"
+ "google.golang.org/cloud/internal/transport"
+)
+
+// Scope is the OAuth2 scope necessary to use Google Cloud Logging.
+const Scope = api.LoggingWriteScope
+
+// Level is the log level.
+type Level int
+
+const (
+ // Default means no assigned severity level.
+ Default Level = iota
+ Debug
+ Info
+ Warning
+ Error
+ Critical
+ Alert
+ Emergency
+ nLevel
+)
+
+var levelName = [nLevel]string{
+ Default: "",
+ Debug: "DEBUG",
+ Info: "INFO",
+ Warning: "WARNING",
+ Error: "ERROR",
+ Critical: "CRITICAL",
+ Alert: "ALERT",
+ Emergency: "EMERGENCY",
+}
+
+func (v Level) String() string {
+ return levelName[v]
+}
+
+// Client is a Google Cloud Logging client.
+// It must be constructed via NewClient.
+type Client struct {
+ svc *api.Service
+ logs *api.ProjectsLogsEntriesService
+ projID string
+ logName string
+ writer [nLevel]io.Writer
+ logger [nLevel]*log.Logger
+
+ mu sync.Mutex
+ queued []*api.LogEntry
+ curFlush *flushCall // currently in-flight flush
+ flushTimer *time.Timer // nil before first use
+ timerActive bool // whether flushTimer is armed
+ inFlight int // number of log entries sent to API service but not yet ACKed
+
+ // For testing:
+ timeNow func() time.Time // optional
+
+ // ServiceName may be "appengine.googleapis.com",
+ // "compute.googleapis.com" or "custom.googleapis.com".
+ //
+ // The default is "custom.googleapis.com".
+ //
+ // The service name is only used by the API server to
+ // determine which of the labels are used to index the logs.
+ ServiceName string
+
+ // CommonLabels are metadata labels that apply to all log
+ // entries in this request, so that you don't have to repeat
+ // them in each log entry's metadata.labels field. If any of
+ // the log entries contains a (key, value) with the same key
+ // that is in CommonLabels, then the entry's (key, value)
+ // overrides the one in CommonLabels.
+ CommonLabels map[string]string
+
+ // BufferLimit is the maximum number of items to keep in memory
+ // before flushing. Zero means automatic. A value of 1 means to
+ // flush after each log entry.
+ // The default is currently 10,000.
+ BufferLimit int
+
+ // FlushAfter optionally specifies a threshold count at which buffered
+ // log entries are flushed, even if the BufferInterval has not yet
+ // been reached.
+ // The default is currently 10.
+ FlushAfter int
+
+ // BufferInterval is the maximum amount of time that an item
+ // should remain buffered in memory before being flushed to
+ // the logging service.
+ // The default is currently 1 second.
+ BufferInterval time.Duration
+
+ // Overflow is a function which runs when the Log function
+ // overflows its configured buffer limit. If nil, the log
+ // entry is dropped. The return value from Overflow is
+ // returned by Log.
+ Overflow func(*Client, Entry) error
+}
+
+func (c *Client) flushAfter() int {
+ if v := c.FlushAfter; v > 0 {
+ return v
+ }
+ return 10
+}
+
+func (c *Client) bufferInterval() time.Duration {
+ if v := c.BufferInterval; v > 0 {
+ return v
+ }
+ return time.Second
+}
+
+func (c *Client) bufferLimit() int {
+ if v := c.BufferLimit; v > 0 {
+ return v
+ }
+ return 10000
+}
+
+func (c *Client) serviceName() string {
+ if v := c.ServiceName; v != "" {
+ return v
+ }
+ return "custom.googleapis.com"
+}
+
+func (c *Client) now() time.Time {
+ if now := c.timeNow; now != nil {
+ return now()
+ }
+ return time.Now()
+}
+
+// Writer returns an io.Writer for the provided log level.
+//
+// Each Write call on the returned Writer generates a log entry.
+//
+// This Writer accessor does not allocate, so callers do not need to
+// cache.
+func (c *Client) Writer(v Level) io.Writer { return c.writer[v] }
+
+// Logger returns a *log.Logger for the provided log level.
+//
+// A Logger for each Level is pre-allocated by NewClient with an empty
+// prefix and no flags. This Logger accessor does not allocate.
+// Callers wishing to use alternate flags (such as log.Lshortfile) may
+// mutate the returned Logger with SetFlags. Such mutations affect all
+// callers in the program.
+func (c *Client) Logger(v Level) *log.Logger { return c.logger[v] }
+
+type levelWriter struct {
+ level Level
+ c *Client
+}
+
+func (w levelWriter) Write(p []byte) (n int, err error) {
+ return len(p), w.c.Log(Entry{
+ Level: w.level,
+ Payload: string(p),
+ })
+}
+
+// Entry is a log entry.
+type Entry struct {
+ // Time is the time of the entry. If the zero value, the current time is used.
+ Time time.Time
+
+ // Level is log entry's severity level.
+ // The zero value means no assigned severity level.
+ Level Level
+
+ // Payload must be either a string, []byte, or something that
+ // marshals via the encoding/json package to a JSON object
+ // (and not any other type of JSON value).
+ Payload interface{}
+
+ // Labels optionally specifies key/value labels for the log entry.
+ // Depending on the Client's ServiceName, these are indexed differently
+ // by the Cloud Logging Service.
+ // See https://cloud.google.com/logging/docs/logs_index
+ // The Client.Log method takes ownership of this map.
+ Labels map[string]string
+
+ // TODO: de-duping id
+}
+
+func (c *Client) apiEntry(e Entry) (*api.LogEntry, error) {
+ t := e.Time
+ if t.IsZero() {
+ t = c.now()
+ }
+
+ ent := &api.LogEntry{
+ Metadata: &api.LogEntryMetadata{
+ Timestamp: t.UTC().Format(time.RFC3339Nano),
+ ServiceName: c.serviceName(),
+ Severity: e.Level.String(),
+ Labels: e.Labels,
+ },
+ }
+ switch p := e.Payload.(type) {
+ case string:
+ ent.TextPayload = p
+ case []byte:
+ ent.TextPayload = string(p)
+ default:
+ ent.StructPayload = api.LogEntryStructPayload(p)
+ }
+ return ent, nil
+}
+
+// LogSync logs e synchronously without any buffering.
+// This is mostly intended for debugging or critical errors.
+func (c *Client) LogSync(e Entry) error {
+ ent, err := c.apiEntry(e)
+ if err != nil {
+ return err
+ }
+ _, err = c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{
+ CommonLabels: c.CommonLabels,
+ Entries: []*api.LogEntry{ent},
+ }).Do()
+ return err
+}
+
+var ErrOverflow = errors.New("logging: log entry overflowed buffer limits")
+
+// Log queues an entry to be sent to the logging service, subject to the
+// Client's parameters. By default, the log will be flushed within
+// one second.
+// Log only returns an error if the entry is invalid or the queue is at
+// capacity. If the queue is at capacity and the entry can't be added,
+// Log returns either ErrOverflow when c.Overflow is nil, or the
+// value returned by c.Overflow.
+func (c *Client) Log(e Entry) error {
+ ent, err := c.apiEntry(e)
+ if err != nil {
+ return err
+ }
+
+ c.mu.Lock()
+ buffered := len(c.queued) + c.inFlight
+
+ if buffered >= c.bufferLimit() {
+ c.mu.Unlock()
+ if fn := c.Overflow; fn != nil {
+ return fn(c, e)
+ }
+ return ErrOverflow
+ }
+ defer c.mu.Unlock()
+
+ c.queued = append(c.queued, ent)
+ if len(c.queued) >= c.flushAfter() {
+ c.scheduleFlushLocked(0)
+ return nil
+ }
+ c.scheduleFlushLocked(c.bufferInterval())
+ return nil
+}
+
+// c.mu must be held.
+//
+// d will be one of two values: either c.BufferInterval (or its
+// default value) or 0.
+func (c *Client) scheduleFlushLocked(d time.Duration) {
+ if c.inFlight > 0 {
+ // For now to keep things simple, only allow one HTTP
+ // request in flight at a time.
+ return
+ }
+ switch {
+ case c.flushTimer == nil:
+ // First flush.
+ c.timerActive = true
+ c.flushTimer = time.AfterFunc(d, c.timeoutFlush)
+ case c.timerActive && d == 0:
+ // Make it happen sooner. For example, this is the
+ // case of transitioning from a 1 second flush after
+ // the 1st item to an immediate flush after the 10th
+ // item.
+ c.flushTimer.Reset(0)
+ case !c.timerActive:
+ c.timerActive = true
+ c.flushTimer.Reset(d)
+ default:
+ // else timer was already active, also at d > 0,
+ // so we don't touch it and let it fire as previously
+ // scheduled.
+ }
+}
+
+// timeoutFlush runs in its own goroutine (from time.AfterFunc) and
+// flushes c.queued.
+func (c *Client) timeoutFlush() {
+ c.mu.Lock()
+ c.timerActive = false
+ c.mu.Unlock()
+ if err := c.Flush(); err != nil {
+ // schedule another try
+ // TODO: smarter back-off?
+ c.mu.Lock()
+ c.scheduleFlushLocked(5 * time.Second)
+ c.mu.Unlock()
+ }
+}
+
+// Ping reports whether the client's connection to Google Cloud
+// Logging and the authentication configuration are valid.
+func (c *Client) Ping() error {
+ _, err := c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{
+ Entries: []*api.LogEntry{},
+ }).Do()
+ return err
+}
+
+// Flush flushes any buffered log entries.
+func (c *Client) Flush() error {
+ var numFlush int
+ c.mu.Lock()
+ for {
+ // We're already flushing (or we just started flushing
+ // ourselves), so wait for it to finish.
+ if f := c.curFlush; f != nil {
+ wasEmpty := len(c.queued) == 0
+ c.mu.Unlock()
+ <-f.donec // wait for it
+ numFlush++
+ // Terminate whenever there's an error, we've
+ // already flushed twice (one that was already
+ // in-flight when flush was called, and then
+ // one we instigated), or the queue was empty
+ // when we released the locked (meaning this
+ // in-flight flush removes everything present
+ // when Flush was called, and we don't need to
+ // kick off a new flush for things arriving
+ // afterward)
+ if f.err != nil || numFlush == 2 || wasEmpty {
+ return f.err
+ }
+ // Otherwise, re-obtain the lock and loop,
+ // starting over with seeing if a flush is in
+ // progress, which might've been started by a
+ // different goroutine before aquiring this
+ // lock again.
+ c.mu.Lock()
+ continue
+ }
+
+ // Terminal case:
+ if len(c.queued) == 0 {
+ c.mu.Unlock()
+ return nil
+ }
+
+ c.startFlushLocked()
+ }
+}
+
+// requires c.mu be held.
+func (c *Client) startFlushLocked() {
+ if c.curFlush != nil {
+ panic("internal error: flush already in flight")
+ }
+ if len(c.queued) == 0 {
+ panic("internal error: no items queued")
+ }
+ logEntries := c.queued
+ c.inFlight = len(logEntries)
+ c.queued = nil
+
+ flush := &flushCall{
+ donec: make(chan struct{}),
+ }
+ c.curFlush = flush
+ go func() {
+ defer close(flush.donec)
+ _, err := c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{
+ CommonLabels: c.CommonLabels,
+ Entries: logEntries,
+ }).Do()
+ flush.err = err
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.inFlight = 0
+ c.curFlush = nil
+ if err != nil {
+ c.queued = append(c.queued, logEntries...)
+ } else if len(c.queued) > 0 {
+ c.scheduleFlushLocked(c.bufferInterval())
+ }
+ }()
+
+}
+
+const prodAddr = "https://logging.googleapis.com/"
+
+const userAgent = "gcloud-golang-logging/20150922"
+
+// NewClient returns a new log client, logging to the named log in the
+// provided project.
+//
+// The exported fields on the returned client may be modified before
+// the client is used for logging. Once log entries are in flight,
+// the fields must not be modified.
+func NewClient(ctx context.Context, projectID, logName string, opts ...cloud.ClientOption) (*Client, error) {
+ httpClient, endpoint, err := transport.NewHTTPClient(ctx, append([]cloud.ClientOption{
+ cloud.WithEndpoint(prodAddr),
+ cloud.WithScopes(api.CloudPlatformScope),
+ cloud.WithUserAgent(userAgent),
+ }, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ svc, err := api.New(httpClient)
+ if err != nil {
+ return nil, err
+ }
+ svc.BasePath = endpoint
+ c := &Client{
+ svc: svc,
+ logs: api.NewProjectsLogsEntriesService(svc),
+ logName: logName,
+ projID: projectID,
+ }
+ for i := range c.writer {
+ level := Level(i)
+ c.writer[level] = levelWriter{level, c}
+ c.logger[level] = log.New(c.writer[level], "", 0)
+ }
+ return c, nil
+}
+
+// flushCall is an in-flight or completed flush.
+type flushCall struct {
+ donec chan struct{} // closed when response is in
+ err error // error is valid after wg is Done
+}
diff --git a/vendor/src/google.golang.org/cloud/option.go b/vendor/src/google.golang.org/cloud/option.go
new file mode 100644
index 0000000000..d8614eb206
--- /dev/null
+++ b/vendor/src/google.golang.org/cloud/option.go
@@ -0,0 +1,102 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cloud
+
+import (
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/cloud/internal/opts"
+ "google.golang.org/grpc"
+)
+
+// ClientOption is used when construct clients for each cloud service.
+type ClientOption interface {
+ // Resolve configures the given DialOpts for this option.
+ Resolve(*opts.DialOpt)
+}
+
+// WithTokenSource returns a ClientOption that specifies an OAuth2 token
+// source to be used as the basis for authentication.
+func WithTokenSource(s oauth2.TokenSource) ClientOption {
+ return withTokenSource{s}
+}
+
+type withTokenSource struct{ ts oauth2.TokenSource }
+
+func (w withTokenSource) Resolve(o *opts.DialOpt) {
+ o.TokenSource = w.ts
+}
+
+// WithEndpoint returns a ClientOption that overrides the default endpoint
+// to be used for a service.
+func WithEndpoint(url string) ClientOption {
+ return withEndpoint(url)
+}
+
+type withEndpoint string
+
+func (w withEndpoint) Resolve(o *opts.DialOpt) {
+ o.Endpoint = string(w)
+}
+
+// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
+// to be used for a service.
+func WithScopes(scope ...string) ClientOption {
+ return withScopes(scope)
+}
+
+type withScopes []string
+
+func (w withScopes) Resolve(o *opts.DialOpt) {
+ s := make([]string, len(w))
+ copy(s, w)
+ o.Scopes = s
+}
+
+// WithUserAgent returns a ClientOption that sets the User-Agent.
+func WithUserAgent(ua string) ClientOption {
+ return withUA(ua)
+}
+
+type withUA string
+
+func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) }
+
+// WithBaseHTTP returns a ClientOption that specifies the HTTP client to
+// use as the basis of communications. This option may only be used with
+// services that support HTTP as their communication transport.
+func WithBaseHTTP(client *http.Client) ClientOption {
+ return withBaseHTTP{client}
+}
+
+type withBaseHTTP struct{ client *http.Client }
+
+func (w withBaseHTTP) Resolve(o *opts.DialOpt) {
+ o.HTTPClient = w.client
+}
+
+// WithBaseGRPC returns a ClientOption that specifies the GRPC client
+// connection to use as the basis of communications. This option many only be
+// used with services that support HRPC as their communication transport.
+func WithBaseGRPC(client *grpc.ClientConn) ClientOption {
+ return withBaseGRPC{client}
+}
+
+type withBaseGRPC struct{ client *grpc.ClientConn }
+
+func (w withBaseGRPC) Resolve(o *opts.DialOpt) {
+ o.GRPCClient = w.client
+}
diff --git a/vendor/src/google.golang.org/grpc/.travis.yml b/vendor/src/google.golang.org/grpc/.travis.yml
new file mode 100644
index 0000000000..3f83776ec5
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+before_install:
+ - go get github.com/axw/gocov/gocov
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+
+install:
+ - mkdir -p "$GOPATH/src/google.golang.org"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc"
+
+script:
+ - make test testrace
+ - make coverage
diff --git a/vendor/src/google.golang.org/grpc/CONTRIBUTING.md b/vendor/src/google.golang.org/grpc/CONTRIBUTING.md
new file mode 100644
index 0000000000..407d384a7c
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+# How to contribute
+
+We definitely welcome patches and contribution to grpc! Here is some guideline
+and information about how to do so.
+
+## Getting started
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+### Filing Issues
+When filing an issue, make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+### Contributing code
+Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
diff --git a/vendor/src/google.golang.org/grpc/Makefile b/vendor/src/google.golang.org/grpc/Makefile
new file mode 100644
index 0000000000..5bc38be209
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/Makefile
@@ -0,0 +1,50 @@
+.PHONY: \
+ all \
+ deps \
+ updatedeps \
+ testdeps \
+ updatetestdeps \
+ build \
+ proto \
+ test \
+ testrace \
+ clean \
+
+all: test testrace
+
+deps:
+ go get -d -v google.golang.org/grpc/...
+
+updatedeps:
+ go get -d -v -u -f google.golang.org/grpc/...
+
+testdeps:
+ go get -d -v -t google.golang.org/grpc/...
+
+updatetestdeps:
+ go get -d -v -t -u -f google.golang.org/grpc/...
+
+build: deps
+ go build google.golang.org/grpc/...
+
+proto:
+ @ if ! which protoc > /dev/null; then \
+ echo "error: protoc not installed" >&2; \
+ exit 1; \
+ fi
+ go get -v github.com/golang/protobuf/protoc-gen-go
+ for file in $$(git ls-files '*.proto'); do \
+ protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \
+ done
+
+test: testdeps
+ go test -v -cpu 1,4 google.golang.org/grpc/...
+
+testrace: testdeps
+ go test -v -race -cpu 1,4 google.golang.org/grpc/...
+
+clean:
+ go clean google.golang.org/grpc/...
+
+coverage: testdeps
+ goveralls -v google.golang.org/grpc/...
diff --git a/vendor/src/google.golang.org/grpc/PATENTS b/vendor/src/google.golang.org/grpc/PATENTS
new file mode 100644
index 0000000000..619f9dbfe6
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the GRPC project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of GRPC, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of GRPC. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of GRPC or any code incorporated within this
+implementation of GRPC constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of GRPC
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/src/google.golang.org/grpc/README.md b/vendor/src/google.golang.org/grpc/README.md
new file mode 100644
index 0000000000..37b05f0953
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/README.md
@@ -0,0 +1,32 @@
+#gRPC-Go
+
+[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
+
+The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
+
+Installation
+------------
+
+To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run:
+
+```
+$ go get google.golang.org/grpc
+```
+
+Prerequisites
+-------------
+
+This requires Go 1.4 or above.
+
+Constraints
+-----------
+The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
+
+Documentation
+-------------
+See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
+
+Status
+------
+Beta release
+
diff --git a/vendor/src/google.golang.org/grpc/call.go b/vendor/src/google.golang.org/grpc/call.go
new file mode 100644
index 0000000000..9d815af39d
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/call.go
@@ -0,0 +1,192 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "io"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/transport"
+)
+
+// recvResponse receives and parses an RPC response.
+// On error, it returns the error and indicates whether the call should be retried.
+//
+// TODO(zhaoq): Check whether the received message sequence is valid.
+func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
+ // Try to acquire header metadata from the server if there is any.
+ var err error
+ c.headerMD, err = stream.Header()
+ if err != nil {
+ return err
+ }
+ p := &parser{s: stream}
+ for {
+ if err = recv(p, codec, reply); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+ }
+ c.trailerMD = stream.Trailer()
+ return nil
+}
+
+// sendRequest writes out various information of an RPC such as Context and Message.
+func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
+ stream, err := t.NewStream(ctx, callHdr)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ if _, ok := err.(transport.ConnectionError); !ok {
+ t.CloseStream(stream, err)
+ }
+ }
+ }()
+ // TODO(zhaoq): Support compression.
+ outBuf, err := encode(codec, args, compressionNone)
+ if err != nil {
+ return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
+ }
+ err = t.Write(stream, outBuf, opts)
+ if err != nil {
+ return nil, err
+ }
+ // Sent successfully.
+ return stream, nil
+}
+
+// callInfo contains all related configuration and information about an RPC.
+type callInfo struct {
+ failFast bool
+ headerMD metadata.MD
+ trailerMD metadata.MD
+ traceInfo traceInfo // in trace.go
+}
+
+// Invoke is called by the generated code. It sends the RPC request on the
+// wire and returns after response is received.
+func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
+ var c callInfo
+ for _, o := range opts {
+ if err := o.before(&c); err != nil {
+ return toRPCErr(err)
+ }
+ }
+ defer func() {
+ for _, o := range opts {
+ o.after(&c)
+ }
+ }()
+ if EnableTracing {
+ c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
+ defer c.traceInfo.tr.Finish()
+ c.traceInfo.firstLine.client = true
+ if deadline, ok := ctx.Deadline(); ok {
+ c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
+ }
+ c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
+ // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
+ defer func() {
+ if err != nil {
+ c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ c.traceInfo.tr.SetError()
+ }
+ }()
+ }
+ topts := &transport.Options{
+ Last: true,
+ Delay: false,
+ }
+ var (
+ lastErr error // record the error that happened
+ )
+ for {
+ var (
+ err error
+ t transport.ClientTransport
+ stream *transport.Stream
+ )
+ // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs.
+ if lastErr != nil && c.failFast {
+ return toRPCErr(lastErr)
+ }
+ callHdr := &transport.CallHdr{
+ Host: cc.authority,
+ Method: method,
+ }
+ t, err = cc.dopts.picker.Pick(ctx)
+ if err != nil {
+ if lastErr != nil {
+ // This was a retry; return the error from the last attempt.
+ return toRPCErr(lastErr)
+ }
+ return toRPCErr(err)
+ }
+ if c.traceInfo.tr != nil {
+ c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
+ }
+ stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts)
+ if err != nil {
+ if _, ok := err.(transport.ConnectionError); ok {
+ lastErr = err
+ continue
+ }
+ if lastErr != nil {
+ return toRPCErr(lastErr)
+ }
+ return toRPCErr(err)
+ }
+ // Receive the response
+ lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply)
+ if _, ok := lastErr.(transport.ConnectionError); ok {
+ continue
+ }
+ if c.traceInfo.tr != nil {
+ c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
+ }
+ t.CloseStream(stream, lastErr)
+ if lastErr != nil {
+ return toRPCErr(lastErr)
+ }
+ return Errorf(stream.StatusCode(), stream.StatusDesc())
+ }
+}
diff --git a/vendor/src/google.golang.org/grpc/clientconn.go b/vendor/src/google.golang.org/grpc/clientconn.go
new file mode 100644
index 0000000000..4729bbd6ab
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/clientconn.go
@@ -0,0 +1,525 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/transport"
+)
+
+var (
+ // ErrUnspecTarget indicates that the target address is unspecified.
+ ErrUnspecTarget = errors.New("grpc: target is unspecified")
+ // ErrNoTransportSecurity indicates that there is no transport security
+ // being set for ClientConn. Users should either set one or explicityly
+ // call WithInsecure DialOption to disable security.
+ ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
+ // ErrCredentialsMisuse indicates that users want to transmit security infomation
+ // (e.g., oauth2 token) which requires secure connection on an insecure
+ // connection.
+ ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)")
+ // ErrClientConnClosing indicates that the operation is illegal because
+ // the session is closing.
+ ErrClientConnClosing = errors.New("grpc: the client connection is closing")
+ // ErrClientConnTimeout indicates that the connection could not be
+ // established or re-established within the specified timeout.
+ ErrClientConnTimeout = errors.New("grpc: timed out trying to connect")
+ // minimum time to give a connection to complete
+ minConnectTimeout = 20 * time.Second
+)
+
+// dialOptions configure a Dial call. dialOptions are set by the DialOption
+// values passed to Dial.
+type dialOptions struct {
+ codec Codec
+ picker Picker
+ block bool
+ insecure bool
+ copts transport.ConnectOptions
+}
+
+// DialOption configures how we set up the connection.
+type DialOption func(*dialOptions)
+
+// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
+func WithCodec(c Codec) DialOption {
+ return func(o *dialOptions) {
+ o.codec = c
+ }
+}
+
+// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
+// connection is up. Without this, Dial returns immediately and connecting the server
+// happens in background.
+func WithBlock() DialOption {
+ return func(o *dialOptions) {
+ o.block = true
+ }
+}
+
+func WithInsecure() DialOption {
+ return func(o *dialOptions) {
+ o.insecure = true
+ }
+}
+
+// WithTransportCredentials returns a DialOption which configures a
+// connection level security credentials (e.g., TLS/SSL).
+func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {
+ return func(o *dialOptions) {
+ o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
+ }
+}
+
+// WithPerRPCCredentials returns a DialOption which sets
+// credentials which will place auth state on each outbound RPC.
+func WithPerRPCCredentials(creds credentials.Credentials) DialOption {
+ return func(o *dialOptions) {
+ o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
+ }
+}
+
+// WithTimeout returns a DialOption that configures a timeout for dialing a client connection.
+func WithTimeout(d time.Duration) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Timeout = d
+ }
+}
+
+// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
+func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Dialer = f
+ }
+}
+
+// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
+func WithUserAgent(s string) DialOption {
+ return func(o *dialOptions) {
+ o.copts.UserAgent = s
+ }
+}
+
+// Dial creates a client connection the given target.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+ cc := &ClientConn{
+ target: target,
+ }
+ for _, opt := range opts {
+ opt(&cc.dopts)
+ }
+ if cc.dopts.codec == nil {
+ // Set the default codec.
+ cc.dopts.codec = protoCodec{}
+ }
+ if cc.dopts.picker == nil {
+ cc.dopts.picker = &unicastPicker{}
+ }
+ if err := cc.dopts.picker.Init(cc); err != nil {
+ return nil, err
+ }
+ colonPos := strings.LastIndex(target, ":")
+ if colonPos == -1 {
+ colonPos = len(target)
+ }
+ cc.authority = target[:colonPos]
+ return cc, nil
+}
+
+// ConnectivityState indicates the state of a client connection.
+type ConnectivityState int
+
+const (
+ // Idle indicates the ClientConn is idle.
+ Idle ConnectivityState = iota
+ // Connecting indicates the ClienConn is connecting.
+ Connecting
+ // Ready indicates the ClientConn is ready for work.
+ Ready
+ // TransientFailure indicates the ClientConn has seen a failure but expects to recover.
+ TransientFailure
+ // Shutdown indicates the ClientConn has started shutting down.
+ Shutdown
+)
+
+func (s ConnectivityState) String() string {
+ switch s {
+ case Idle:
+ return "IDLE"
+ case Connecting:
+ return "CONNECTING"
+ case Ready:
+ return "READY"
+ case TransientFailure:
+ return "TRANSIENT_FAILURE"
+ case Shutdown:
+ return "SHUTDOWN"
+ default:
+ panic(fmt.Sprintf("unknown connectivity state: %d", s))
+ }
+}
+
+// ClientConn represents a client connection to an RPC service.
+type ClientConn struct {
+ target string
+ authority string
+ dopts dialOptions
+}
+
+// State returns the connectivity state of cc.
+// This is EXPERIMENTAL API.
+func (cc *ClientConn) State() ConnectivityState {
+ return cc.dopts.picker.State()
+}
+
+// WaitForStateChange blocks until the state changes to something other than the sourceState
+// or timeout fires on cc. It returns false if timeout fires, and true otherwise.
+// This is EXPERIMENTAL API.
+func (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool {
+ return cc.dopts.picker.WaitForStateChange(timeout, sourceState)
+}
+
+// Close starts to tear down the ClientConn.
+func (cc *ClientConn) Close() error {
+ return cc.dopts.picker.Close()
+}
+
+// Conn is a client connection to a single destination.
+type Conn struct {
+ target string
+ dopts dialOptions
+ shutdownChan chan struct{}
+ events trace.EventLog
+
+ mu sync.Mutex
+ state ConnectivityState
+ stateCV *sync.Cond
+ // ready is closed and becomes nil when a new transport is up or failed
+ // due to timeout.
+ ready chan struct{}
+ transport transport.ClientTransport
+}
+
+// NewConn creates a Conn.
+func NewConn(cc *ClientConn) (*Conn, error) {
+ if cc.target == "" {
+ return nil, ErrUnspecTarget
+ }
+ c := &Conn{
+ target: cc.target,
+ dopts: cc.dopts,
+ shutdownChan: make(chan struct{}),
+ }
+ if EnableTracing {
+ c.events = trace.NewEventLog("grpc.ClientConn", c.target)
+ }
+ if !c.dopts.insecure {
+ var ok bool
+ for _, cd := range c.dopts.copts.AuthOptions {
+ if _, ok := cd.(credentials.TransportAuthenticator); !ok {
+ continue
+ }
+ ok = true
+ }
+ if !ok {
+ return nil, ErrNoTransportSecurity
+ }
+ } else {
+ for _, cd := range c.dopts.copts.AuthOptions {
+ if cd.RequireTransportSecurity() {
+ return nil, ErrCredentialsMisuse
+ }
+ }
+ }
+ c.stateCV = sync.NewCond(&c.mu)
+ if c.dopts.block {
+ if err := c.resetTransport(false); err != nil {
+ c.Close()
+ return nil, err
+ }
+ // Start to monitor the error status of transport.
+ go c.transportMonitor()
+ } else {
+ // Start a goroutine connecting to the server asynchronously.
+ go func() {
+ if err := c.resetTransport(false); err != nil {
+ grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err)
+ c.Close()
+ return
+ }
+ c.transportMonitor()
+ }()
+ }
+ return c, nil
+}
+
+// printf records an event in cc's event log, unless cc has been closed.
+// REQUIRES cc.mu is held.
+func (cc *Conn) printf(format string, a ...interface{}) {
+ if cc.events != nil {
+ cc.events.Printf(format, a...)
+ }
+}
+
+// errorf records an error in cc's event log, unless cc has been closed.
+// REQUIRES cc.mu is held.
+func (cc *Conn) errorf(format string, a ...interface{}) {
+ if cc.events != nil {
+ cc.events.Errorf(format, a...)
+ }
+}
+
+// State returns the connectivity state of the Conn
+func (cc *Conn) State() ConnectivityState {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.state
+}
+
+// WaitForStateChange blocks until the state changes to something other than the sourceState
+// or timeout fires. It returns false if timeout fires and true otherwise.
+// TODO(zhaoq): Rewrite for complex Picker.
+func (cc *Conn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool {
+ start := time.Now()
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if sourceState != cc.state {
+ return true
+ }
+ expired := timeout <= time.Since(start)
+ if expired {
+ return false
+ }
+ done := make(chan struct{})
+ go func() {
+ select {
+ case <-time.After(timeout - time.Since(start)):
+ cc.mu.Lock()
+ expired = true
+ cc.stateCV.Broadcast()
+ cc.mu.Unlock()
+ case <-done:
+ }
+ }()
+ defer close(done)
+ for sourceState == cc.state {
+ cc.stateCV.Wait()
+ if expired {
+ return false
+ }
+ }
+ return true
+}
+
+func (cc *Conn) resetTransport(closeTransport bool) error {
+ var retries int
+ start := time.Now()
+ for {
+ cc.mu.Lock()
+ cc.printf("connecting")
+ if cc.state == Shutdown {
+ cc.mu.Unlock()
+ return ErrClientConnClosing
+ }
+ cc.state = Connecting
+ cc.stateCV.Broadcast()
+ cc.mu.Unlock()
+ if closeTransport {
+ cc.transport.Close()
+ }
+ // Adjust timeout for the current try.
+ copts := cc.dopts.copts
+ if copts.Timeout < 0 {
+ cc.Close()
+ return ErrClientConnTimeout
+ }
+ if copts.Timeout > 0 {
+ copts.Timeout -= time.Since(start)
+ if copts.Timeout <= 0 {
+ cc.Close()
+ return ErrClientConnTimeout
+ }
+ }
+ sleepTime := backoff(retries)
+ timeout := sleepTime
+ if timeout < minConnectTimeout {
+ timeout = minConnectTimeout
+ }
+ if copts.Timeout == 0 || copts.Timeout > timeout {
+ copts.Timeout = timeout
+ }
+ connectTime := time.Now()
+ newTransport, err := transport.NewClientTransport(cc.target, &copts)
+ if err != nil {
+ cc.mu.Lock()
+ cc.errorf("transient failure: %v", err)
+ cc.state = TransientFailure
+ cc.stateCV.Broadcast()
+ if cc.ready != nil {
+ close(cc.ready)
+ cc.ready = nil
+ }
+ cc.mu.Unlock()
+ sleepTime -= time.Since(connectTime)
+ if sleepTime < 0 {
+ sleepTime = 0
+ }
+ // Fail early before falling into sleep.
+ if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) {
+ cc.mu.Lock()
+ cc.errorf("connection timeout")
+ cc.mu.Unlock()
+ cc.Close()
+ return ErrClientConnTimeout
+ }
+ closeTransport = false
+ time.Sleep(sleepTime)
+ retries++
+ grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target)
+ continue
+ }
+ cc.mu.Lock()
+ cc.printf("ready")
+ if cc.state == Shutdown {
+ // cc.Close() has been invoked.
+ cc.mu.Unlock()
+ newTransport.Close()
+ return ErrClientConnClosing
+ }
+ cc.state = Ready
+ cc.stateCV.Broadcast()
+ cc.transport = newTransport
+ if cc.ready != nil {
+ close(cc.ready)
+ cc.ready = nil
+ }
+ cc.mu.Unlock()
+ return nil
+ }
+}
+
+// Run in a goroutine to track the error in transport and create the
+// new transport if an error happens. It returns when the channel is closing.
+func (cc *Conn) transportMonitor() {
+ for {
+ select {
+ // shutdownChan is needed to detect the teardown when
+ // the ClientConn is idle (i.e., no RPC in flight).
+ case <-cc.shutdownChan:
+ return
+ case <-cc.transport.Error():
+ cc.mu.Lock()
+ cc.state = TransientFailure
+ cc.stateCV.Broadcast()
+ cc.mu.Unlock()
+ if err := cc.resetTransport(true); err != nil {
+ // The ClientConn is closing.
+ cc.mu.Lock()
+ cc.printf("transport exiting: %v", err)
+ cc.mu.Unlock()
+ grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err)
+ return
+ }
+ continue
+ }
+ }
+}
+
+// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
+func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
+ for {
+ cc.mu.Lock()
+ switch {
+ case cc.state == Shutdown:
+ cc.mu.Unlock()
+ return nil, ErrClientConnClosing
+ case cc.state == Ready:
+ cc.mu.Unlock()
+ return cc.transport, nil
+ default:
+ ready := cc.ready
+ if ready == nil {
+ ready = make(chan struct{})
+ cc.ready = ready
+ }
+ cc.mu.Unlock()
+ select {
+ case <-ctx.Done():
+ return nil, transport.ContextErr(ctx.Err())
+ // Wait until the new transport is ready or failed.
+ case <-ready:
+ }
+ }
+ }
+}
+
+// Close starts to tear down the Conn. Returns ErrClientConnClosing if
+// it has been closed (mostly due to dial time-out).
+// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
+// some edge cases (e.g., the caller opens and closes many ClientConn's in a
+// tight loop.
+func (cc *Conn) Close() error {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if cc.state == Shutdown {
+ return ErrClientConnClosing
+ }
+ cc.state = Shutdown
+ cc.stateCV.Broadcast()
+ if cc.events != nil {
+ cc.events.Finish()
+ cc.events = nil
+ }
+ if cc.ready != nil {
+ close(cc.ready)
+ cc.ready = nil
+ }
+ if cc.transport != nil {
+ cc.transport.Close()
+ }
+ if cc.shutdownChan != nil {
+ close(cc.shutdownChan)
+ }
+ return nil
+}
diff --git a/vendor/src/google.golang.org/grpc/codegen.sh b/vendor/src/google.golang.org/grpc/codegen.sh
new file mode 100755
index 0000000000..b009488842
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/codegen.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# This script serves as an example to demonstrate how to generate the gRPC-Go
+# interface and the related messages from .proto file.
+#
+# It assumes the installation of i) Google proto buffer compiler at
+# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
+# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
+# not, please install them first.
+#
+# We recommend running this script at $GOPATH/src.
+#
+# If this is not what you need, feel free to make your own scripts. Again, this
+# script is for demonstration purpose.
+#
+proto=$1
+protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/src/google.golang.org/grpc/codes/code_string.go b/vendor/src/google.golang.org/grpc/codes/code_string.go
new file mode 100644
index 0000000000..e6762d0845
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/codes/code_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=Code; DO NOT EDIT
+
+package codes
+
+import "fmt"
+
+const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
+
+var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
+
+func (i Code) String() string {
+ if i+1 >= Code(len(_Code_index)) {
+ return fmt.Sprintf("Code(%d)", i)
+ }
+ return _Code_name[_Code_index[i]:_Code_index[i+1]]
+}
diff --git a/vendor/src/google.golang.org/grpc/codes/codes.go b/vendor/src/google.golang.org/grpc/codes/codes.go
new file mode 100644
index 0000000000..e14b464acf
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/codes/codes.go
@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package codes defines the canonical error codes used by gRPC. It is
+// consistent across various languages.
+package codes // import "google.golang.org/grpc/codes"
+
+// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+type Code uint32
+
+//go:generate stringer -type=Code
+
+const (
+ // OK is returned on success.
+ OK Code = 0
+
+ // Canceled indicates the operation was cancelled (typically by the caller).
+ Canceled Code = 1
+
+ // Unknown error. An example of where this error may be returned is
+ // if a Status value received from another address space belongs to
+ // an error-space that is not known in this address space. Also
+ // errors raised by APIs that do not return enough error information
+ // may be converted to this error.
+ Unknown Code = 2
+
+ // InvalidArgument indicates client specified an invalid argument.
+ // Note that this differs from FailedPrecondition. It indicates arguments
+ // that are problematic regardless of the state of the system
+ // (e.g., a malformed file name).
+ InvalidArgument Code = 3
+
+ // DeadlineExceeded means operation expired before completion.
+ // For operations that change the state of the system, this error may be
+ // returned even if the operation has completed successfully. For
+ // example, a successful response from a server could have been delayed
+ // long enough for the deadline to expire.
+ DeadlineExceeded Code = 4
+
+ // NotFound means some requested entity (e.g., file or directory) was
+ // not found.
+ NotFound Code = 5
+
+ // AlreadyExists means an attempt to create an entity failed because one
+ // already exists.
+ AlreadyExists Code = 6
+
+ // PermissionDenied indicates the caller does not have permission to
+ // execute the specified operation. It must not be used for rejections
+ // caused by exhausting some resource (use ResourceExhausted
+ // instead for those errors). It must not be
+ // used if the caller cannot be identified (use Unauthenticated
+ // instead for those errors).
+ PermissionDenied Code = 7
+
+ // Unauthenticated indicates the request does not have valid
+ // authentication credentials for the operation.
+ Unauthenticated Code = 16
+
+ // ResourceExhausted indicates some resource has been exhausted, perhaps
+ // a per-user quota, or perhaps the entire file system is out of space.
+ ResourceExhausted Code = 8
+
+ // FailedPrecondition indicates operation was rejected because the
+ // system is not in a state required for the operation's execution.
+ // For example, directory to be deleted may be non-empty, an rmdir
+ // operation is applied to a non-directory, etc.
+ //
+ // A litmus test that may help a service implementor in deciding
+ // between FailedPrecondition, Aborted, and Unavailable:
+ // (a) Use Unavailable if the client can retry just the failing call.
+ // (b) Use Aborted if the client should retry at a higher-level
+ // (e.g., restarting a read-modify-write sequence).
+ // (c) Use FailedPrecondition if the client should not retry until
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // fails because the directory is non-empty, FailedPrecondition
+ // should be returned since the client should not retry unless
+ // they have first fixed up the directory by deleting files from it.
+ // (d) Use FailedPrecondition if the client performs conditional
+ // REST Get/Update/Delete on a resource and the resource on the
+ // server does not match the condition. E.g., conflicting
+ // read-modify-write on the same resource.
+ FailedPrecondition Code = 9
+
+ // Aborted indicates the operation was aborted, typically due to a
+ // concurrency issue like sequencer check failures, transaction aborts,
+ // etc.
+ //
+ // See litmus test above for deciding between FailedPrecondition,
+ // Aborted, and Unavailable.
+ Aborted Code = 10
+
+ // OutOfRange means operation was attempted past the valid range.
+ // E.g., seeking or reading past end of file.
+ //
+ // Unlike InvalidArgument, this error indicates a problem that may
+ // be fixed if the system state changes. For example, a 32-bit file
+ // system will generate InvalidArgument if asked to read at an
+ // offset that is not in the range [0,2^32-1], but it will generate
+ // OutOfRange if asked to read from an offset past the current
+ // file size.
+ //
+ // There is a fair bit of overlap between FailedPrecondition and
+ // OutOfRange. We recommend using OutOfRange (the more specific
+ // error) when it applies so that callers who are iterating through
+ // a space can easily look for an OutOfRange error to detect when
+ // they are done.
+ OutOfRange Code = 11
+
+ // Unimplemented indicates operation is not implemented or not
+ // supported/enabled in this service.
+ Unimplemented Code = 12
+
+ // Internal errors. Means some invariants expected by underlying
+ // system has been broken. If you see one of these errors,
+ // something is very broken.
+ Internal Code = 13
+
+ // Unavailable indicates the service is currently unavailable.
+ // This is a most likely a transient condition and may be corrected
+ // by retrying with a backoff.
+ //
+ // See litmus test above for deciding between FailedPrecondition,
+ // Aborted, and Unavailable.
+ Unavailable Code = 14
+
+ // DataLoss indicates unrecoverable data loss or corruption.
+ DataLoss Code = 15
+)
diff --git a/vendor/src/google.golang.org/grpc/credentials/credentials.go b/vendor/src/google.golang.org/grpc/credentials/credentials.go
new file mode 100644
index 0000000000..cde38dc4e4
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/credentials/credentials.go
@@ -0,0 +1,239 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package credentials implements various credentials supported by gRPC library,
+// which encapsulate all the state needed by a client to authenticate with a
+// server and make various assertions, e.g., about the client's identity, role,
+// or whether it is authorized to make a particular call.
+package credentials // import "google.golang.org/grpc/credentials"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+var (
+ // alpnProtoStr are the specified application level protocols for gRPC.
+ alpnProtoStr = []string{"h2"}
+)
+
+// Credentials defines the common interface all supported credentials must
+// implement.
+type Credentials interface {
+ // GetRequestMetadata gets the current request metadata, refreshing
+ // tokens if required. This should be called by the transport layer on
+ // each request, and the data should be populated in headers or other
+ // context. uri is the URI of the entry point for the request. When
+ // supported by the underlying implementation, ctx can be used for
+ // timeout and cancellation.
+ // TODO(zhaoq): Define the set of the qualified keys instead of leaving
+ // it as an arbitrary string.
+ GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
+ // RequireTransportSecurity indicates whether the credentails requires
+ // transport security.
+ RequireTransportSecurity() bool
+}
+
+// ProtocolInfo provides information regarding the gRPC wire protocol version,
+// security protocol, security protocol version in use, etc.
+type ProtocolInfo struct {
+ // ProtocolVersion is the gRPC wire protocol version.
+ ProtocolVersion string
+ // SecurityProtocol is the security protocol in use.
+ SecurityProtocol string
+ // SecurityVersion is the security protocol version.
+ SecurityVersion string
+}
+
+// AuthInfo defines the common interface for the auth information the users are interested in.
+type AuthInfo interface {
+ AuthType() string
+}
+
+type authInfoKey struct{}
+
+// NewContext creates a new context with authInfo attached.
+func NewContext(ctx context.Context, authInfo AuthInfo) context.Context {
+ return context.WithValue(ctx, authInfoKey{}, authInfo)
+}
+
+// FromContext returns the authInfo in ctx if it exists.
+func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) {
+ authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo)
+ return
+}
+
+// TransportAuthenticator defines the common interface for all the live gRPC wire
+// protocols and supported transport security protocols (e.g., TLS, SSL).
+type TransportAuthenticator interface {
+ // ClientHandshake does the authentication handshake specified by the corresponding
+ // authentication protocol on rawConn for clients. It returns the authenticated
+ // connection and the corresponding auth information about the connection.
+ ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error)
+ // ServerHandshake does the authentication handshake for servers. It returns
+ // the authenticated connection and the corresponding auth information about
+ // the connection.
+ ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
+ // Info provides the ProtocolInfo of this TransportAuthenticator.
+ Info() ProtocolInfo
+ Credentials
+}
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+ State tls.ConnectionState
+}
+
+func (t TLSInfo) AuthType() string {
+ return "tls"
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+ // TLS configuration
+ config tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+ return ProtocolInfo{
+ SecurityProtocol: "tls",
+ SecurityVersion: "1.2",
+ }
+}
+
+// GetRequestMetadata returns nil, nil since TLS credentials does not have
+// metadata.
+func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ return nil, nil
+}
+
+func (c *tlsCreds) RequireTransportSecurity() bool {
+ return true
+}
+
+type timeoutError struct{}
+
+func (timeoutError) Error() string { return "credentials: Dial timed out" }
+func (timeoutError) Timeout() bool { return true }
+func (timeoutError) Temporary() bool { return true }
+
+func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) {
+ // borrow some code from tls.DialWithDialer
+ var errChannel chan error
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- timeoutError{}
+ })
+ }
+ if c.config.ServerName == "" {
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ c.config.ServerName = addr[:colonPos]
+ }
+ conn := tls.Client(rawConn, &c.config)
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+ err = <-errChannel
+ }
+ if err != nil {
+ rawConn.Close()
+ return nil, nil, err
+ }
+ // TODO(zhaoq): Omit the auth info for client now. It is more for
+ // information than anything else.
+ return conn, nil, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+ conn := tls.Server(rawConn, &c.config)
+ if err := conn.Handshake(); err != nil {
+ rawConn.Close()
+ return nil, nil, err
+ }
+ return conn, TLSInfo{conn.ConnectionState()}, nil
+}
+
+// NewTLS uses c to construct a TransportAuthenticator based on TLS.
+func NewTLS(c *tls.Config) TransportAuthenticator {
+ tc := &tlsCreds{*c}
+ tc.config.NextProtos = alpnProtoStr
+ return tc
+}
+
+// NewClientTLSFromCert constructs a TLS from the input certificate for client.
+func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator {
+ return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
+func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) {
+ b, err := ioutil.ReadFile(certFile)
+ if err != nil {
+ return nil, err
+ }
+ cp := x509.NewCertPool()
+ if !cp.AppendCertsFromPEM(b) {
+ return nil, fmt.Errorf("credentials: failed to append certificates")
+ }
+ return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs a TLS from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator {
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs a TLS from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
diff --git a/vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go
new file mode 100644
index 0000000000..04943fdf03
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go
@@ -0,0 +1,177 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package oauth implements gRPC credentials using OAuth.
+package oauth
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/grpc/credentials"
+)
+
+// TokenSource supplies credentials from an oauth2.TokenSource.
+type TokenSource struct {
+ oauth2.TokenSource
+}
+
+// GetRequestMetadata gets the request metadata as a map from a TokenSource.
+func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ token, err := ts.Token()
+ if err != nil {
+ return nil, err
+ }
+ return map[string]string{
+ "authorization": token.TokenType + " " + token.AccessToken,
+ }, nil
+}
+
+func (ts TokenSource) RequireTransportSecurity() bool {
+ return true
+}
+
+type jwtAccess struct {
+ jsonKey []byte
+}
+
+func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) {
+ jsonKey, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
+ }
+ return NewJWTAccessFromKey(jsonKey)
+}
+
+func NewJWTAccessFromKey(jsonKey []byte) (credentials.Credentials, error) {
+ return jwtAccess{jsonKey}, nil
+}
+
+func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0])
+ if err != nil {
+ return nil, err
+ }
+ token, err := ts.Token()
+ if err != nil {
+ return nil, err
+ }
+ return map[string]string{
+ "authorization": token.TokenType + " " + token.AccessToken,
+ }, nil
+}
+
+func (j jwtAccess) RequireTransportSecurity() bool {
+ return true
+}
+
+// oauthAccess supplies credentials from a given token.
+type oauthAccess struct {
+ token oauth2.Token
+}
+
+// NewOauthAccess constructs the credentials using a given token.
+func NewOauthAccess(token *oauth2.Token) credentials.Credentials {
+ return oauthAccess{token: *token}
+}
+
+func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ return map[string]string{
+ "authorization": oa.token.TokenType + " " + oa.token.AccessToken,
+ }, nil
+}
+
+func (oa oauthAccess) RequireTransportSecurity() bool {
+ return true
+}
+
+// NewComputeEngine constructs the credentials that fetches access tokens from
+// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
+// if your program is running on a GCE instance.
+// TODO(dsymonds): Deprecate and remove this.
+func NewComputeEngine() credentials.Credentials {
+ return TokenSource{google.ComputeTokenSource("")}
+}
+
+// serviceAccount represents credentials via JWT signing key.
+type serviceAccount struct {
+ config *jwt.Config
+}
+
+func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ token, err := s.config.TokenSource(ctx).Token()
+ if err != nil {
+ return nil, err
+ }
+ return map[string]string{
+ "authorization": token.TokenType + " " + token.AccessToken,
+ }, nil
+}
+
+func (s serviceAccount) RequireTransportSecurity() bool {
+ return true
+}
+
+// NewServiceAccountFromKey constructs the credentials using the JSON key slice
+// from a Google Developers service account.
+func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Credentials, error) {
+ config, err := google.JWTConfigFromJSON(jsonKey, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return serviceAccount{config: config}, nil
+}
+
+// NewServiceAccountFromFile constructs the credentials using the JSON key file
+// of a Google Developers service account.
+func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Credentials, error) {
+ jsonKey, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
+ }
+ return NewServiceAccountFromKey(jsonKey, scope...)
+}
+
+// NewApplicationDefault returns "Application Default Credentials". For more
+// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
+func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.Credentials, error) {
+ t, err := google.DefaultTokenSource(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return TokenSource{t}, nil
+}
diff --git a/vendor/src/google.golang.org/grpc/doc.go b/vendor/src/google.golang.org/grpc/doc.go
new file mode 100644
index 0000000000..c63847745d
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/doc.go
@@ -0,0 +1,6 @@
+/*
+Package grpc implements an RPC system called gRPC.
+
+See https://github.com/grpc/grpc for more information about gRPC.
+*/
+package grpc // import "google.golang.org/grpc"
diff --git a/vendor/src/google.golang.org/grpc/grpclog/logger.go b/vendor/src/google.golang.org/grpc/grpclog/logger.go
new file mode 100644
index 0000000000..ec089f70f8
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/grpclog/logger.go
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+Package grpclog defines logging for grpc.
+*/
+package grpclog // import "google.golang.org/grpc/grpclog"
+
+import (
+ "log"
+ "os"
+)
+
+// Use golang's standard logger by default.
+var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
+
+// Logger mimics golang's standard Logger as an interface.
+type Logger interface {
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fatalln(args ...interface{})
+ Print(args ...interface{})
+ Printf(format string, args ...interface{})
+ Println(args ...interface{})
+}
+
+// SetLogger sets the logger that is used in grpc.
+func SetLogger(l Logger) {
+ logger = l
+}
+
+// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
+func Fatal(args ...interface{}) {
+ logger.Fatal(args...)
+}
+
+// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
+func Fatalf(format string, args ...interface{}) {
+ logger.Fatalf(format, args...)
+}
+
+// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
+func Fatalln(args ...interface{}) {
+ logger.Fatalln(args...)
+}
+
+// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
+func Print(args ...interface{}) {
+ logger.Print(args...)
+}
+
+// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
+func Printf(format string, args ...interface{}) {
+ logger.Printf(format, args...)
+}
+
+// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
+func Println(args ...interface{}) {
+ logger.Println(args...)
+}
diff --git a/vendor/src/google.golang.org/grpc/metadata/metadata.go b/vendor/src/google.golang.org/grpc/metadata/metadata.go
new file mode 100644
index 0000000000..adebc38f8e
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/metadata/metadata.go
@@ -0,0 +1,146 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package metadata define the structure of the metadata supported by gRPC library.
+package metadata // import "google.golang.org/grpc/metadata"
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+const (
+ binHdrSuffix = "-bin"
+)
+
+// grpc-http2 requires ASCII header key and value (more detail can be found in
+// "Requests" subsection in go/grpc-http2).
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c > 127 {
+ return false
+ }
+ }
+ return true
+}
+
+// encodeKeyValue encodes key and value qualified for transmission via gRPC.
+// Transmitting binary headers violates HTTP/2 spec.
+// TODO(zhaoq): Maybe check if k is ASCII also.
+func encodeKeyValue(k, v string) (string, string) {
+ if isASCII(v) {
+ return k, v
+ }
+ key := strings.ToLower(k + binHdrSuffix)
+ val := base64.StdEncoding.EncodeToString([]byte(v))
+ return key, string(val)
+}
+
+// DecodeKeyValue returns the original key and value corresponding to the
+// encoded data in k, v.
+func DecodeKeyValue(k, v string) (string, string, error) {
+ if !strings.HasSuffix(k, binHdrSuffix) {
+ return k, v, nil
+ }
+ key := k[:len(k)-len(binHdrSuffix)]
+ val, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return "", "", err
+ }
+ return key, string(val), nil
+}
+
+// MD is a mapping from metadata keys to values. Users should use the following
+// two convenience functions New and Pairs to generate MD.
+type MD map[string][]string
+
+// New creates a MD from given key-value map.
+func New(m map[string]string) MD {
+ md := MD{}
+ for k, v := range m {
+ key, val := encodeKeyValue(k, v)
+ md[key] = append(md[key], val)
+ }
+ return md
+}
+
+// Pairs returns an MD formed by the mapping of key, value ...
+// Pairs panics if len(kv) is odd.
+func Pairs(kv ...string) MD {
+ if len(kv)%2 == 1 {
+ panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
+ }
+ md := MD{}
+ var k string
+ for i, s := range kv {
+ if i%2 == 0 {
+ k = s
+ continue
+ }
+ key, val := encodeKeyValue(k, s)
+ md[key] = append(md[key], val)
+ }
+ return md
+}
+
+// Len returns the number of items in md.
+func (md MD) Len() int {
+ return len(md)
+}
+
+// Copy returns a copy of md.
+func (md MD) Copy() MD {
+ out := MD{}
+ for k, v := range md {
+ for _, i := range v {
+ out[k] = append(out[k], i)
+ }
+ }
+ return out
+}
+
+type mdKey struct{}
+
+// NewContext creates a new context with md attached.
+func NewContext(ctx context.Context, md MD) context.Context {
+ return context.WithValue(ctx, mdKey{}, md)
+}
+
+// FromContext returns the MD in ctx if it exists.
+func FromContext(ctx context.Context) (md MD, ok bool) {
+ md, ok = ctx.Value(mdKey{}).(MD)
+ return
+}
diff --git a/vendor/src/google.golang.org/grpc/picker.go b/vendor/src/google.golang.org/grpc/picker.go
new file mode 100644
index 0000000000..bc48573a41
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/picker.go
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/transport"
+)
+
+// Picker picks a Conn for RPC requests.
+// This is EXPERIMENTAL and please do not implement your own Picker for now.
+type Picker interface {
+ // Init does initial processing for the Picker, e.g., initiate some connections.
+ Init(cc *ClientConn) error
+ // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC
+ // or some error happens.
+ Pick(ctx context.Context) (transport.ClientTransport, error)
+ // State returns the connectivity state of the underlying connections.
+ State() ConnectivityState
+ // WaitForStateChange blocks until the state changes to something other than
+ // the sourceState or timeout fires on cc. It returns false if timeout fires,
+ // and true otherwise.
+ WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool
+ // Close closes all the Conn's owned by this Picker.
+ Close() error
+}
+
+// unicastPicker is the default Picker which is used when there is no custom Picker
+// specified by users. It always picks the same Conn.
+type unicastPicker struct {
+ conn *Conn
+}
+
+func (p *unicastPicker) Init(cc *ClientConn) error {
+ c, err := NewConn(cc)
+ if err != nil {
+ return err
+ }
+ p.conn = c
+ return nil
+}
+
+func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) {
+ return p.conn.Wait(ctx)
+}
+
+func (p *unicastPicker) State() ConnectivityState {
+ return p.conn.State()
+}
+
+func (p *unicastPicker) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool {
+ return p.conn.WaitForStateChange(timeout, sourceState)
+}
+
+func (p *unicastPicker) Close() error {
+ if p.conn != nil {
+ return p.conn.Close()
+ }
+ return nil
+}
diff --git a/vendor/src/google.golang.org/grpc/rpc_util.go b/vendor/src/google.golang.org/grpc/rpc_util.go
new file mode 100644
index 0000000000..46a6801b07
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/rpc_util.go
@@ -0,0 +1,337 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "math/rand"
+ "os"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/transport"
+)
+
+// Codec defines the interface gRPC uses to encode and decode messages.
+type Codec interface {
+ // Marshal returns the wire format of v.
+ Marshal(v interface{}) ([]byte, error)
+ // Unmarshal parses the wire format into v.
+ Unmarshal(data []byte, v interface{}) error
+ // String returns the name of the Codec implementation. The returned
+ // string will be used as part of content type in transmission.
+ String() string
+}
+
+// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC.
+type protoCodec struct{}
+
+func (protoCodec) Marshal(v interface{}) ([]byte, error) {
+ return proto.Marshal(v.(proto.Message))
+}
+
+func (protoCodec) Unmarshal(data []byte, v interface{}) error {
+ return proto.Unmarshal(data, v.(proto.Message))
+}
+
+func (protoCodec) String() string {
+ return "proto"
+}
+
+// CallOption configures a Call before it starts or extracts information from
+// a Call after it completes.
+type CallOption interface {
+ // before is called before the call is sent to any server. If before
+ // returns a non-nil error, the RPC fails with that error.
+ before(*callInfo) error
+
+ // after is called after the call has completed. after cannot return an
+ // error, so any failures should be reported via output parameters.
+ after(*callInfo)
+}
+
+type beforeCall func(c *callInfo) error
+
+func (o beforeCall) before(c *callInfo) error { return o(c) }
+func (o beforeCall) after(c *callInfo) {}
+
+type afterCall func(c *callInfo)
+
+func (o afterCall) before(c *callInfo) error { return nil }
+func (o afterCall) after(c *callInfo) { o(c) }
+
+// Header returns a CallOptions that retrieves the header metadata
+// for a unary RPC.
+func Header(md *metadata.MD) CallOption {
+ return afterCall(func(c *callInfo) {
+ *md = c.headerMD
+ })
+}
+
+// Trailer returns a CallOptions that retrieves the trailer metadata
+// for a unary RPC.
+func Trailer(md *metadata.MD) CallOption {
+ return afterCall(func(c *callInfo) {
+ *md = c.trailerMD
+ })
+}
+
+// The format of the payload: compressed or not?
+type payloadFormat uint8
+
+const (
+ compressionNone payloadFormat = iota // no compression
+ compressionFlate
+ // More formats
+)
+
+// parser reads complelete gRPC messages from the underlying reader.
+type parser struct {
+ s io.Reader
+}
+
+// recvMsg is to read a complete gRPC message from the stream. It is blocking if
+// the message has not been complete yet. It returns the message and its type,
+// EOF is returned with nil msg and 0 pf if the entire stream is done. Other
+// non-nil error is returned if something is wrong on reading.
+func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {
+ // The header of a gRPC message. Find more detail
+ // at http://www.grpc.io/docs/guides/wire.html.
+ var buf [5]byte
+
+ if _, err := io.ReadFull(p.s, buf[:]); err != nil {
+ return 0, nil, err
+ }
+
+ pf = payloadFormat(buf[0])
+ length := binary.BigEndian.Uint32(buf[1:])
+
+ if length == 0 {
+ return pf, nil, nil
+ }
+ msg = make([]byte, int(length))
+ if _, err := io.ReadFull(p.s, msg); err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return 0, nil, err
+ }
+ return pf, msg, nil
+}
+
+// encode serializes msg and prepends the message header. If msg is nil, it
+// generates the message header of 0 message length.
+func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {
+ var b []byte
+ var length uint
+ if msg != nil {
+ var err error
+ // TODO(zhaoq): optimize to reduce memory alloc and copying.
+ b, err = c.Marshal(msg)
+ if err != nil {
+ return nil, err
+ }
+ length = uint(len(b))
+ }
+ if length > math.MaxUint32 {
+ return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
+ }
+
+ const (
+ payloadLen = 1
+ sizeLen = 4
+ )
+
+ var buf = make([]byte, payloadLen+sizeLen+len(b))
+
+ // Write payload format
+ buf[0] = byte(pf)
+ // Write length of b into buf
+ binary.BigEndian.PutUint32(buf[1:], uint32(length))
+ // Copy encoded msg to buf
+ copy(buf[5:], b)
+
+ return buf, nil
+}
+
+func recv(p *parser, c Codec, m interface{}) error {
+ pf, d, err := p.recvMsg()
+ if err != nil {
+ return err
+ }
+ switch pf {
+ case compressionNone:
+ if err := c.Unmarshal(d, m); err != nil {
+ if rErr, ok := err.(rpcError); ok {
+ return rErr
+ } else {
+ return Errorf(codes.Internal, "grpc: %v", err)
+ }
+ }
+ default:
+ return Errorf(codes.Internal, "gprc: compression is not supported yet.")
+ }
+ return nil
+}
+
+// rpcError defines the status from an RPC.
+type rpcError struct {
+ code codes.Code
+ desc string
+}
+
+func (e rpcError) Error() string {
+ return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc)
+}
+
+// Code returns the error code for err if it was produced by the rpc system.
+// Otherwise, it returns codes.Unknown.
+func Code(err error) codes.Code {
+ if err == nil {
+ return codes.OK
+ }
+ if e, ok := err.(rpcError); ok {
+ return e.code
+ }
+ return codes.Unknown
+}
+
+// ErrorDesc returns the error description of err if it was produced by the rpc system.
+// Otherwise, it returns err.Error() or empty string when err is nil.
+func ErrorDesc(err error) string {
+ if err == nil {
+ return ""
+ }
+ if e, ok := err.(rpcError); ok {
+ return e.desc
+ }
+ return err.Error()
+}
+
+// Errorf returns an error containing an error code and a description;
+// Errorf returns nil if c is OK.
+func Errorf(c codes.Code, format string, a ...interface{}) error {
+ if c == codes.OK {
+ return nil
+ }
+ return rpcError{
+ code: c,
+ desc: fmt.Sprintf(format, a...),
+ }
+}
+
+// toRPCErr converts an error into a rpcError.
+func toRPCErr(err error) error {
+ switch e := err.(type) {
+ case rpcError:
+ return err
+ case transport.StreamError:
+ return rpcError{
+ code: e.Code,
+ desc: e.Desc,
+ }
+ case transport.ConnectionError:
+ return rpcError{
+ code: codes.Internal,
+ desc: e.Desc,
+ }
+ }
+ return Errorf(codes.Unknown, "%v", err)
+}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+ switch err {
+ case nil:
+ return codes.OK
+ case io.EOF:
+ return codes.OutOfRange
+ case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+ return codes.FailedPrecondition
+ case os.ErrInvalid:
+ return codes.InvalidArgument
+ case context.Canceled:
+ return codes.Canceled
+ case context.DeadlineExceeded:
+ return codes.DeadlineExceeded
+ }
+ switch {
+ case os.IsExist(err):
+ return codes.AlreadyExists
+ case os.IsNotExist(err):
+ return codes.NotFound
+ case os.IsPermission(err):
+ return codes.PermissionDenied
+ }
+ return codes.Unknown
+}
+
+const (
+ // how long to wait after the first failure before retrying
+ baseDelay = 1.0 * time.Second
+ // upper bound of backoff delay
+ maxDelay = 120 * time.Second
+ // backoff increases by this factor on each retry
+ backoffFactor = 1.6
+ // backoff is randomized downwards by this factor
+ backoffJitter = 0.2
+)
+
+func backoff(retries int) (t time.Duration) {
+ if retries == 0 {
+ return baseDelay
+ }
+ backoff, max := float64(baseDelay), float64(maxDelay)
+ for backoff < max && retries > 0 {
+ backoff *= backoffFactor
+ retries--
+ }
+ if backoff > max {
+ backoff = max
+ }
+ // Randomize backoff delays so that if a cluster of requests start at
+ // the same time, they won't operate in lockstep.
+ backoff *= 1 + backoffJitter*(rand.Float64()*2-1)
+ if backoff < 0 {
+ return 0
+ }
+ return time.Duration(backoff)
+}
diff --git a/vendor/src/google.golang.org/grpc/server.go b/vendor/src/google.golang.org/grpc/server.go
new file mode 100644
index 0000000000..487a75c5ee
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/server.go
@@ -0,0 +1,542 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/transport"
+)
+
+type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error)
+
+// MethodDesc represents an RPC service's method specification.
+type MethodDesc struct {
+ MethodName string
+ Handler methodHandler
+}
+
+// ServiceDesc represents an RPC service's specification.
+type ServiceDesc struct {
+ ServiceName string
+ // The pointer to the service interface. Used to check whether the user
+ // provided implementation satisfies the interface requirements.
+ HandlerType interface{}
+ Methods []MethodDesc
+ Streams []StreamDesc
+}
+
+// service consists of the information of the server serving this service and
+// the methods in this service.
+type service struct {
+ server interface{} // the server for service methods
+ md map[string]*MethodDesc
+ sd map[string]*StreamDesc
+}
+
+// Server is a gRPC server to serve RPC requests.
+type Server struct {
+ opts options
+ mu sync.Mutex
+ lis map[net.Listener]bool
+ conns map[transport.ServerTransport]bool
+ m map[string]*service // service name -> service info
+ events trace.EventLog
+}
+
+type options struct {
+ creds credentials.Credentials
+ codec Codec
+ maxConcurrentStreams uint32
+}
+
+// A ServerOption sets options.
+type ServerOption func(*options)
+
+// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
+func CustomCodec(codec Codec) ServerOption {
+ return func(o *options) {
+ o.codec = codec
+ }
+}
+
+// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
+// of concurrent streams to each ServerTransport.
+func MaxConcurrentStreams(n uint32) ServerOption {
+ return func(o *options) {
+ o.maxConcurrentStreams = n
+ }
+}
+
+// Creds returns a ServerOption that sets credentials for server connections.
+func Creds(c credentials.Credentials) ServerOption {
+ return func(o *options) {
+ o.creds = c
+ }
+}
+
+// NewServer creates a gRPC server which has no service registered and has not
+// started to accept requests yet.
+func NewServer(opt ...ServerOption) *Server {
+ var opts options
+ for _, o := range opt {
+ o(&opts)
+ }
+ if opts.codec == nil {
+ // Set the default codec.
+ opts.codec = protoCodec{}
+ }
+ s := &Server{
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ conns: make(map[transport.ServerTransport]bool),
+ m: make(map[string]*service),
+ }
+ if EnableTracing {
+ _, file, line, _ := runtime.Caller(1)
+ s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
+ }
+ return s
+}
+
+// printf records an event in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) printf(format string, a ...interface{}) {
+ if s.events != nil {
+ s.events.Printf(format, a...)
+ }
+}
+
+// errorf records an error in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) errorf(format string, a ...interface{}) {
+ if s.events != nil {
+ s.events.Errorf(format, a...)
+ }
+}
+
+// RegisterService register a service and its implementation to the gRPC
+// server. Called from the IDL generated code. This must be called before
+// invoking Serve.
+func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
+ ht := reflect.TypeOf(sd.HandlerType).Elem()
+ st := reflect.TypeOf(ss)
+ if !st.Implements(ht) {
+ grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+ }
+ s.register(sd, ss)
+}
+
+func (s *Server) register(sd *ServiceDesc, ss interface{}) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.printf("RegisterService(%q)", sd.ServiceName)
+ if _, ok := s.m[sd.ServiceName]; ok {
+ grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+ }
+ srv := &service{
+ server: ss,
+ md: make(map[string]*MethodDesc),
+ sd: make(map[string]*StreamDesc),
+ }
+ for i := range sd.Methods {
+ d := &sd.Methods[i]
+ srv.md[d.MethodName] = d
+ }
+ for i := range sd.Streams {
+ d := &sd.Streams[i]
+ srv.sd[d.StreamName] = d
+ }
+ s.m[sd.ServiceName] = srv
+}
+
+var (
+ // ErrServerStopped indicates that the operation is now illegal because of
+ // the server being stopped.
+ ErrServerStopped = errors.New("grpc: the server has been stopped")
+)
+
+// Serve accepts incoming connections on the listener lis, creating a new
+// ServerTransport and service goroutine for each. The service goroutines
+// read gRPC request and then call the registered handlers to reply to them.
+// Service returns when lis.Accept fails.
+func (s *Server) Serve(lis net.Listener) error {
+ s.mu.Lock()
+ s.printf("serving")
+ if s.lis == nil {
+ s.mu.Unlock()
+ return ErrServerStopped
+ }
+ s.lis[lis] = true
+ s.mu.Unlock()
+ defer func() {
+ lis.Close()
+ s.mu.Lock()
+ delete(s.lis, lis)
+ s.mu.Unlock()
+ }()
+ for {
+ c, err := lis.Accept()
+ if err != nil {
+ s.mu.Lock()
+ s.printf("done serving; Accept = %v", err)
+ s.mu.Unlock()
+ return err
+ }
+ var authInfo credentials.AuthInfo
+ if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok {
+ var conn net.Conn
+ conn, authInfo, err = creds.ServerHandshake(c)
+ if err != nil {
+ s.mu.Lock()
+ s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err)
+ s.mu.Unlock()
+ grpclog.Println("grpc: Server.Serve failed to complete security handshake.")
+ continue
+ }
+ c = conn
+ }
+ s.mu.Lock()
+ if s.conns == nil {
+ s.mu.Unlock()
+ c.Close()
+ return nil
+ }
+ st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
+ if err != nil {
+ s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
+ s.mu.Unlock()
+ c.Close()
+ grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
+ continue
+ }
+ s.conns[st] = true
+ s.mu.Unlock()
+
+ go func() {
+ var wg sync.WaitGroup
+ st.HandleStreams(func(stream *transport.Stream) {
+ var trInfo *traceInfo
+ if EnableTracing {
+ trInfo = &traceInfo{
+ tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
+ }
+ trInfo.firstLine.client = false
+ trInfo.firstLine.remoteAddr = st.RemoteAddr()
+ stream.TraceContext(trInfo.tr)
+ if dl, ok := stream.Context().Deadline(); ok {
+ trInfo.firstLine.deadline = dl.Sub(time.Now())
+ }
+ }
+ wg.Add(1)
+ go func() {
+ s.handleStream(st, stream, trInfo)
+ wg.Done()
+ }()
+ })
+ wg.Wait()
+ s.mu.Lock()
+ delete(s.conns, st)
+ s.mu.Unlock()
+ }()
+ }
+}
+
+func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error {
+ p, err := encode(s.opts.codec, msg, pf)
+ if err != nil {
+ // This typically indicates a fatal issue (e.g., memory
+ // corruption or hardware faults) the application program
+ // cannot handle.
+ //
+ // TODO(zhaoq): There exist other options also such as only closing the
+ // faulty stream locally and remotely (Other streams can keep going). Find
+ // the optimal option.
+ grpclog.Fatalf("grpc: Server failed to encode response %v", err)
+ }
+ return t.Write(stream, p, opts)
+}
+
+func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
+ if trInfo != nil {
+ defer trInfo.tr.Finish()
+ trInfo.firstLine.client = false
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ defer func() {
+ if err != nil && err != io.EOF {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ }()
+ }
+ p := &parser{s: stream}
+ for {
+ pf, req, err := p.recvMsg()
+ if err == io.EOF {
+ // The entire stream is done (for unary RPC only).
+ return err
+ }
+ if err != nil {
+ switch err := err.(type) {
+ case transport.ConnectionError:
+ // Nothing to do here.
+ case transport.StreamError:
+ if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
+ }
+ default:
+ panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
+ }
+ return err
+ }
+ switch pf {
+ case compressionNone:
+ statusCode := codes.OK
+ statusDesc := ""
+ df := func(v interface{}) error {
+ if err := s.opts.codec.Unmarshal(req, v); err != nil {
+ return err
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
+ }
+ return nil
+ }
+ reply, appErr := md.Handler(srv.server, stream.Context(), df)
+ if appErr != nil {
+ if err, ok := appErr.(rpcError); ok {
+ statusCode = err.code
+ statusDesc = err.desc
+ } else {
+ statusCode = convertCode(appErr)
+ statusDesc = appErr.Error()
+ }
+ if trInfo != nil && statusCode != codes.OK {
+ trInfo.tr.LazyLog(stringer(statusDesc), true)
+ trInfo.tr.SetError()
+ }
+
+ if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
+ return err
+ }
+ return nil
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(stringer("OK"), false)
+ }
+ opts := &transport.Options{
+ Last: true,
+ Delay: false,
+ }
+ if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil {
+ switch err := err.(type) {
+ case transport.ConnectionError:
+ // Nothing to do here.
+ case transport.StreamError:
+ statusCode = err.Code
+ statusDesc = err.Desc
+ default:
+ statusCode = codes.Unknown
+ statusDesc = err.Error()
+ }
+ return err
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
+ }
+ return t.WriteStatus(stream, statusCode, statusDesc)
+ default:
+ panic(fmt.Sprintf("payload format to be supported: %d", pf))
+ }
+ }
+}
+
+func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+ ss := &serverStream{
+ t: t,
+ s: stream,
+ p: &parser{s: stream},
+ codec: s.opts.codec,
+ trInfo: trInfo,
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ defer func() {
+ ss.mu.Lock()
+ if err != nil && err != io.EOF {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ trInfo.tr.Finish()
+ trInfo.tr = nil
+ ss.mu.Unlock()
+ }()
+ }
+ if appErr := sd.Handler(srv.server, ss); appErr != nil {
+ if err, ok := appErr.(rpcError); ok {
+ ss.statusCode = err.code
+ ss.statusDesc = err.desc
+ } else {
+ ss.statusCode = convertCode(appErr)
+ ss.statusDesc = appErr.Error()
+ }
+ }
+ if trInfo != nil {
+ ss.mu.Lock()
+ if ss.statusCode != codes.OK {
+ trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
+ trInfo.tr.SetError()
+ } else {
+ trInfo.tr.LazyLog(stringer("OK"), false)
+ }
+ ss.mu.Unlock()
+ }
+ return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
+
+}
+
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+ sm := stream.Method()
+ if sm != "" && sm[0] == '/' {
+ sm = sm[1:]
+ }
+ pos := strings.LastIndex(sm, "/")
+ if pos == -1 {
+ if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil {
+ grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ }
+ return
+ }
+ service := sm[:pos]
+ method := sm[pos+1:]
+ srv, ok := s.m[service]
+ if !ok {
+ if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil {
+ grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ }
+ return
+ }
+ // Unary RPC or Streaming RPC?
+ if md, ok := srv.md[method]; ok {
+ s.processUnaryRPC(t, stream, srv, md, trInfo)
+ return
+ }
+ if sd, ok := srv.sd[method]; ok {
+ s.processStreamingRPC(t, stream, srv, sd, trInfo)
+ return
+ }
+ if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil {
+ grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ }
+}
+
+// Stop stops the gRPC server. Once Stop returns, the server stops accepting
+// connection requests and closes all the connected connections.
+func (s *Server) Stop() {
+ s.mu.Lock()
+ listeners := s.lis
+ s.lis = nil
+ cs := s.conns
+ s.conns = nil
+ s.mu.Unlock()
+ for lis := range listeners {
+ lis.Close()
+ }
+ for c := range cs {
+ c.Close()
+ }
+ s.mu.Lock()
+ if s.events != nil {
+ s.events.Finish()
+ s.events = nil
+ }
+ s.mu.Unlock()
+}
+
+// TestingCloseConns closes all exiting transports but keeps s.lis accepting new
+// connections. This is for test only now.
+func (s *Server) TestingCloseConns() {
+ s.mu.Lock()
+ for c := range s.conns {
+ c.Close()
+ }
+ s.conns = make(map[transport.ServerTransport]bool)
+ s.mu.Unlock()
+}
+
+// SendHeader sends header metadata. It may be called at most once from a unary
+// RPC handler. The ctx is the RPC handler's Context or one derived from it.
+func SendHeader(ctx context.Context, md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ stream, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
+ }
+ t := stream.ServerTransport()
+ if t == nil {
+ grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
+ }
+ return t.WriteHeader(stream, md)
+}
+
+// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
+// It may be called at most once from a unary RPC handler. The ctx is the RPC
+// handler's Context or one derived from it.
+func SetTrailer(ctx context.Context, md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ stream, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
+ }
+ return stream.SetTrailer(md)
+}
diff --git a/vendor/src/google.golang.org/grpc/stream.go b/vendor/src/google.golang.org/grpc/stream.go
new file mode 100644
index 0000000000..2370dd0e9d
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/stream.go
@@ -0,0 +1,368 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "errors"
+ "io"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/transport"
+)
+
+type streamHandler func(srv interface{}, stream ServerStream) error
+
+// StreamDesc represents a streaming RPC service's method specification.
+type StreamDesc struct {
+ StreamName string
+ Handler streamHandler
+
+ // At least one of these is true.
+ ServerStreams bool
+ ClientStreams bool
+}
+
+// Stream defines the common interface a client or server stream has to satisfy.
+type Stream interface {
+ // Context returns the context for this stream.
+ Context() context.Context
+ // SendMsg blocks until it sends m, the stream is done or the stream
+ // breaks.
+ // On error, it aborts the stream and returns an RPC status on client
+ // side. On server side, it simply returns the error to the caller.
+ // SendMsg is called by generated code.
+ SendMsg(m interface{}) error
+ // RecvMsg blocks until it receives a message or the stream is
+ // done. On client side, it returns io.EOF when the stream is done. On
+ // any other error, it aborts the streama nd returns an RPC status. On
+ // server side, it simply returns the error to the caller.
+ RecvMsg(m interface{}) error
+}
+
+// ClientStream defines the interface a client stream has to satify.
+type ClientStream interface {
+ // Header returns the header metedata received from the server if there
+ // is any. It blocks if the metadata is not ready to read.
+ Header() (metadata.MD, error)
+ // Trailer returns the trailer metadata from the server. It must be called
+ // after stream.Recv() returns non-nil error (including io.EOF) for
+ // bi-directional streaming and server streaming or stream.CloseAndRecv()
+ // returns for client streaming in order to receive trailer metadata if
+ // present. Otherwise, it could returns an empty MD even though trailer
+ // is present.
+ Trailer() metadata.MD
+ // CloseSend closes the send direction of the stream. It closes the stream
+ // when non-nil error is met.
+ CloseSend() error
+ Stream
+}
+
+// NewClientStream creates a new Stream for the client side. This is called
+// by generated code.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+ var (
+ t transport.ClientTransport
+ err error
+ )
+ t, err = cc.dopts.picker.Pick(ctx)
+ if err != nil {
+ return nil, toRPCErr(err)
+ }
+ // TODO(zhaoq): CallOption is omitted. Add support when it is needed.
+ callHdr := &transport.CallHdr{
+ Host: cc.authority,
+ Method: method,
+ }
+ cs := &clientStream{
+ desc: desc,
+ codec: cc.dopts.codec,
+ tracing: EnableTracing,
+ }
+ if cs.tracing {
+ cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
+ cs.trInfo.firstLine.client = true
+ if deadline, ok := ctx.Deadline(); ok {
+ cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
+ }
+ cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
+ ctx = trace.NewContext(ctx, cs.trInfo.tr)
+ }
+ s, err := t.NewStream(ctx, callHdr)
+ if err != nil {
+ return nil, toRPCErr(err)
+ }
+ cs.t = t
+ cs.s = s
+ cs.p = &parser{s: s}
+ // Listen on ctx.Done() to detect cancellation when there is no pending
+ // I/O operations on this stream.
+ go func() {
+ <-s.Context().Done()
+ cs.closeTransportStream(transport.ContextErr(s.Context().Err()))
+ }()
+ return cs, nil
+}
+
+// clientStream implements a client side Stream.
+type clientStream struct {
+ t transport.ClientTransport
+ s *transport.Stream
+ p *parser
+ desc *StreamDesc
+ codec Codec
+
+ tracing bool // set to EnableTracing when the clientStream is created.
+
+ mu sync.Mutex
+ closed bool
+ // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
+ // and is set to nil when the clientStream's finish method is called.
+ trInfo traceInfo
+}
+
+func (cs *clientStream) Context() context.Context {
+ return cs.s.Context()
+}
+
+func (cs *clientStream) Header() (metadata.MD, error) {
+ m, err := cs.s.Header()
+ if err != nil {
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ }
+ return m, err
+}
+
+func (cs *clientStream) Trailer() metadata.MD {
+ return cs.s.Trailer()
+}
+
+func (cs *clientStream) SendMsg(m interface{}) (err error) {
+ if cs.tracing {
+ cs.mu.Lock()
+ if cs.trInfo.tr != nil {
+ cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+ }
+ cs.mu.Unlock()
+ }
+ defer func() {
+ if err == nil || err == io.EOF {
+ return
+ }
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ err = toRPCErr(err)
+ }()
+ out, err := encode(cs.codec, m, compressionNone)
+ if err != nil {
+ return transport.StreamErrorf(codes.Internal, "grpc: %v", err)
+ }
+ return cs.t.Write(cs.s, out, &transport.Options{Last: false})
+}
+
+func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+ err = recv(cs.p, cs.codec, m)
+ defer func() {
+ // err != nil indicates the termination of the stream.
+ if err != nil {
+ cs.finish(err)
+ }
+ }()
+ if err == nil {
+ if cs.tracing {
+ cs.mu.Lock()
+ if cs.trInfo.tr != nil {
+ cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+ }
+ cs.mu.Unlock()
+ }
+ if !cs.desc.ClientStreams || cs.desc.ServerStreams {
+ return
+ }
+ // Special handling for client streaming rpc.
+ err = recv(cs.p, cs.codec, m)
+ cs.closeTransportStream(err)
+ if err == nil {
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ }
+ if err == io.EOF {
+ if cs.s.StatusCode() == codes.OK {
+ return nil
+ }
+ return Errorf(cs.s.StatusCode(), cs.s.StatusDesc())
+ }
+ return toRPCErr(err)
+ }
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ if err == io.EOF {
+ if cs.s.StatusCode() == codes.OK {
+ // Returns io.EOF to indicate the end of the stream.
+ return
+ }
+ return Errorf(cs.s.StatusCode(), cs.s.StatusDesc())
+ }
+ return toRPCErr(err)
+}
+
+func (cs *clientStream) CloseSend() (err error) {
+ err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
+ if err == nil || err == io.EOF {
+ return
+ }
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ err = toRPCErr(err)
+ return
+}
+
+func (cs *clientStream) closeTransportStream(err error) {
+ cs.mu.Lock()
+ if cs.closed {
+ cs.mu.Unlock()
+ return
+ }
+ cs.closed = true
+ cs.mu.Unlock()
+ cs.t.CloseStream(cs.s, err)
+}
+
+func (cs *clientStream) finish(err error) {
+ if !cs.tracing {
+ return
+ }
+ cs.mu.Lock()
+ defer cs.mu.Unlock()
+ if cs.trInfo.tr != nil {
+ if err == nil || err == io.EOF {
+ cs.trInfo.tr.LazyPrintf("RPC: [OK]")
+ } else {
+ cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+ cs.trInfo.tr.SetError()
+ }
+ cs.trInfo.tr.Finish()
+ cs.trInfo.tr = nil
+ }
+}
+
+// ServerStream defines the interface a server stream has to satisfy.
+type ServerStream interface {
+ // SendHeader sends the header metadata. It should not be called
+ // after SendProto. It fails if called multiple times or if
+ // called after SendProto.
+ SendHeader(metadata.MD) error
+ // SetTrailer sets the trailer metadata which will be sent with the
+ // RPC status.
+ SetTrailer(metadata.MD)
+ Stream
+}
+
+// serverStream implements a server side Stream.
+type serverStream struct {
+ t transport.ServerTransport
+ s *transport.Stream
+ p *parser
+ codec Codec
+ statusCode codes.Code
+ statusDesc string
+ trInfo *traceInfo
+
+ mu sync.Mutex // protects trInfo.tr after the service handler runs.
+}
+
+func (ss *serverStream) Context() context.Context {
+ return ss.s.Context()
+}
+
+func (ss *serverStream) SendHeader(md metadata.MD) error {
+ return ss.t.WriteHeader(ss.s, md)
+}
+
+func (ss *serverStream) SetTrailer(md metadata.MD) {
+ if md.Len() == 0 {
+ return
+ }
+ ss.s.SetTrailer(md)
+ return
+}
+
+func (ss *serverStream) SendMsg(m interface{}) (err error) {
+ defer func() {
+ if ss.trInfo != nil {
+ ss.mu.Lock()
+ if ss.trInfo.tr != nil {
+ if err == nil {
+ ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+ } else {
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.SetError()
+ }
+ }
+ ss.mu.Unlock()
+ }
+ }()
+ out, err := encode(ss.codec, m, compressionNone)
+ if err != nil {
+ err = transport.StreamErrorf(codes.Internal, "grpc: %v", err)
+ return err
+ }
+ return ss.t.Write(ss.s, out, &transport.Options{Last: false})
+}
+
+func (ss *serverStream) RecvMsg(m interface{}) (err error) {
+ defer func() {
+ if ss.trInfo != nil {
+ ss.mu.Lock()
+ if ss.trInfo.tr != nil {
+ if err == nil {
+ ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+ } else if err != io.EOF {
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.SetError()
+ }
+ }
+ ss.mu.Unlock()
+ }
+ }()
+ return recv(ss.p, ss.codec, m)
+}
diff --git a/vendor/src/google.golang.org/grpc/trace.go b/vendor/src/google.golang.org/grpc/trace.go
new file mode 100644
index 0000000000..cde04fbfc9
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/trace.go
@@ -0,0 +1,120 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "time"
+
+ "golang.org/x/net/trace"
+)
+
+// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
+// This should only be set before any RPCs are sent or received by this program.
+var EnableTracing = true
+
+// methodFamily returns the trace family for the given method.
+// It turns "/pkg.Service/GetFoo" into "pkg.Service".
+func methodFamily(m string) string {
+ m = strings.TrimPrefix(m, "/") // remove leading slash
+ if i := strings.Index(m, "/"); i >= 0 {
+ m = m[:i] // remove everything from second slash
+ }
+ if i := strings.LastIndex(m, "."); i >= 0 {
+ m = m[i+1:] // cut down to last dotted component
+ }
+ return m
+}
+
+// traceInfo contains tracing information for an RPC.
+type traceInfo struct {
+ tr trace.Trace
+ firstLine firstLine
+}
+
+// firstLine is the first line of an RPC trace.
+type firstLine struct {
+ client bool // whether this is a client (outgoing) RPC
+ remoteAddr net.Addr
+ deadline time.Duration // may be zero
+}
+
+func (f *firstLine) String() string {
+ var line bytes.Buffer
+ io.WriteString(&line, "RPC: ")
+ if f.client {
+ io.WriteString(&line, "to")
+ } else {
+ io.WriteString(&line, "from")
+ }
+ fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
+ if f.deadline != 0 {
+ fmt.Fprint(&line, f.deadline)
+ } else {
+ io.WriteString(&line, "none")
+ }
+ return line.String()
+}
+
+// payload represents an RPC request or response payload.
+type payload struct {
+ sent bool // whether this is an outgoing payload
+ msg interface{} // e.g. a proto.Message
+ // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
+}
+
+func (p payload) String() string {
+ if p.sent {
+ return fmt.Sprintf("sent: %v", p.msg)
+ } else {
+ return fmt.Sprintf("recv: %v", p.msg)
+ }
+}
+
+type fmtStringer struct {
+ format string
+ a []interface{}
+}
+
+func (f *fmtStringer) String() string {
+ return fmt.Sprintf(f.format, f.a...)
+}
+
+type stringer string
+
+func (s stringer) String() string { return string(s) }
diff --git a/vendor/src/google.golang.org/grpc/transport/control.go b/vendor/src/google.golang.org/grpc/transport/control.go
new file mode 100644
index 0000000000..6b5201e7a6
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/transport/control.go
@@ -0,0 +1,259 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "fmt"
+ "sync"
+
+ "golang.org/x/net/http2"
+)
+
+const (
+ // The default value of flow control window size in HTTP2 spec.
+ defaultWindowSize = 65535
+ // The initial window size for flow control.
+ initialWindowSize = defaultWindowSize // for an RPC
+ initialConnWindowSize = defaultWindowSize * 16 // for a connection
+)
+
+// The following defines various control items which could flow through
+// the control buffer of transport. They represent different aspects of
+// control tasks, e.g., flow control, settings, streaming resetting, etc.
+type windowUpdate struct {
+ streamID uint32
+ increment uint32
+}
+
+func (windowUpdate) isItem() bool {
+ return true
+}
+
+type settings struct {
+ ack bool
+ ss []http2.Setting
+}
+
+func (settings) isItem() bool {
+ return true
+}
+
+type resetStream struct {
+ streamID uint32
+ code http2.ErrCode
+}
+
+func (resetStream) isItem() bool {
+ return true
+}
+
+type flushIO struct {
+}
+
+func (flushIO) isItem() bool {
+ return true
+}
+
+type ping struct {
+ ack bool
+}
+
+func (ping) isItem() bool {
+ return true
+}
+
+// quotaPool is a pool which accumulates the quota and sends it to acquire()
+// when it is available.
+type quotaPool struct {
+ c chan int
+
+ mu sync.Mutex
+ quota int
+}
+
+// newQuotaPool creates a quotaPool which has quota q available to consume.
+func newQuotaPool(q int) *quotaPool {
+ qb := &quotaPool{
+ c: make(chan int, 1),
+ }
+ if q > 0 {
+ qb.c <- q
+ } else {
+ qb.quota = q
+ }
+ return qb
+}
+
+// add adds n to the available quota and tries to send it on acquire.
+func (qb *quotaPool) add(n int) {
+ qb.mu.Lock()
+ defer qb.mu.Unlock()
+ qb.quota += n
+ if qb.quota <= 0 {
+ return
+ }
+ select {
+ case qb.c <- qb.quota:
+ qb.quota = 0
+ default:
+ }
+}
+
+// cancel cancels the pending quota sent on acquire, if any.
+func (qb *quotaPool) cancel() {
+ qb.mu.Lock()
+ defer qb.mu.Unlock()
+ select {
+ case n := <-qb.c:
+ qb.quota += n
+ default:
+ }
+}
+
+// reset cancels the pending quota sent on acquired, incremented by v and sends
+// it back on acquire.
+func (qb *quotaPool) reset(v int) {
+ qb.mu.Lock()
+ defer qb.mu.Unlock()
+ select {
+ case n := <-qb.c:
+ qb.quota += n
+ default:
+ }
+ qb.quota += v
+ if qb.quota <= 0 {
+ return
+ }
+ select {
+ case qb.c <- qb.quota:
+ qb.quota = 0
+ default:
+ }
+}
+
+// acquire returns the channel on which available quota amounts are sent.
+func (qb *quotaPool) acquire() <-chan int {
+ return qb.c
+}
+
+// inFlow deals with inbound flow control
+type inFlow struct {
+ // The inbound flow control limit for pending data.
+ limit uint32
+ // conn points to the shared connection-level inFlow that is shared
+ // by all streams on that conn. It is nil for the inFlow on the conn
+ // directly.
+ conn *inFlow
+
+ mu sync.Mutex
+ // pendingData is the overall data which have been received but not been
+ // consumed by applications.
+ pendingData uint32
+ // The amount of data the application has consumed but grpc has not sent
+ // window update for them. Used to reduce window update frequency.
+ pendingUpdate uint32
+}
+
+// onData is invoked when some data frame is received. It increments not only its
+// own pendingData but also that of the associated connection-level flow.
+func (f *inFlow) onData(n uint32) error {
+ if n == 0 {
+ return nil
+ }
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.pendingData+f.pendingUpdate+n > f.limit {
+ return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit)
+ }
+ if f.conn != nil {
+ if err := f.conn.onData(n); err != nil {
+ return ConnectionErrorf("%v", err)
+ }
+ }
+ f.pendingData += n
+ return nil
+}
+
+// connOnRead updates the connection level states when the application consumes data.
+func (f *inFlow) connOnRead(n uint32) uint32 {
+ if n == 0 || f.conn != nil {
+ return 0
+ }
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.pendingData -= n
+ f.pendingUpdate += n
+ if f.pendingUpdate >= f.limit/4 {
+ ret := f.pendingUpdate
+ f.pendingUpdate = 0
+ return ret
+ }
+ return 0
+}
+
+// onRead is invoked when the application reads the data. It returns the window updates
+// for both stream and connection level.
+func (f *inFlow) onRead(n uint32) (swu, cwu uint32) {
+ if n == 0 {
+ return
+ }
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.pendingData == 0 {
+ // pendingData has been adjusted by restoreConn.
+ return
+ }
+ f.pendingData -= n
+ f.pendingUpdate += n
+ if f.pendingUpdate >= f.limit/4 {
+ swu = f.pendingUpdate
+ f.pendingUpdate = 0
+ }
+ cwu = f.conn.connOnRead(n)
+ return
+}
+
+// restoreConn is invoked when a stream is terminated. It removes its stake in
+// the connection-level flow and resets its own state.
+func (f *inFlow) restoreConn() uint32 {
+ if f.conn == nil {
+ return 0
+ }
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ n := f.pendingData
+ f.pendingData = 0
+ f.pendingUpdate = 0
+ return f.conn.connOnRead(n)
+}
diff --git a/vendor/src/google.golang.org/grpc/transport/http2_client.go b/vendor/src/google.golang.org/grpc/transport/http2_client.go
new file mode 100644
index 0000000000..b13fb78c4c
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/transport/http2_client.go
@@ -0,0 +1,860 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "math"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+)
+
+// http2Client implements the ClientTransport interface with HTTP2.
+type http2Client struct {
+ target string // server name/addr
+ userAgent string
+ conn net.Conn // underlying communication channel
+ authInfo credentials.AuthInfo // auth info about the connection
+ nextID uint32 // the next stream ID to be used
+
+ // writableChan synchronizes write access to the transport.
+ // A writer acquires the write lock by sending a value on writableChan
+ // and releases it by receiving from writableChan.
+ writableChan chan int
+ // shutdownChan is closed when Close is called.
+ // Blocking operations should select on shutdownChan to avoid
+ // blocking forever after Close.
+ // TODO(zhaoq): Maybe have a channel context?
+ shutdownChan chan struct{}
+ // errorChan is closed to notify the I/O error to the caller.
+ errorChan chan struct{}
+
+ framer *framer
+ hBuf *bytes.Buffer // the buffer for HPACK encoding
+ hEnc *hpack.Encoder // HPACK encoder
+
+ // controlBuf delivers all the control related tasks (e.g., window
+ // updates, reset streams, and various settings) to the controller.
+ controlBuf *recvBuffer
+ fc *inFlow
+ // sendQuotaPool provides flow control to outbound message.
+ sendQuotaPool *quotaPool
+ // streamsQuota limits the max number of concurrent streams.
+ streamsQuota *quotaPool
+
+ // The scheme used: https if TLS is on, http otherwise.
+ scheme string
+
+ authCreds []credentials.Credentials
+
+ mu sync.Mutex // guard the following variables
+ state transportState // the state of underlying connection
+ activeStreams map[uint32]*Stream
+ // The max number of concurrent streams
+ maxStreams int
+ // the per-stream outbound flow control window size set by the peer.
+ streamSendQuota uint32
+}
+
+// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// and starts to receive messages on it. Non-nil error returns if construction
+// fails.
+func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) {
+ if opts.Dialer == nil {
+ // Set the default Dialer.
+ opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("tcp", addr, timeout)
+ }
+ }
+ scheme := "http"
+ startT := time.Now()
+ timeout := opts.Timeout
+ conn, connErr := opts.Dialer(addr, timeout)
+ if connErr != nil {
+ return nil, ConnectionErrorf("transport: %v", connErr)
+ }
+ var authInfo credentials.AuthInfo
+ for _, c := range opts.AuthOptions {
+ if ccreds, ok := c.(credentials.TransportAuthenticator); ok {
+ scheme = "https"
+ // TODO(zhaoq): Now the first TransportAuthenticator is used if there are
+ // multiple ones provided. Revisit this if it is not appropriate. Probably
+ // place the ClientTransport construction into a separate function to make
+ // things clear.
+ if timeout > 0 {
+ timeout -= time.Since(startT)
+ }
+ conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout)
+ break
+ }
+ }
+ if connErr != nil {
+ return nil, ConnectionErrorf("transport: %v", connErr)
+ }
+ defer func() {
+ if err != nil {
+ conn.Close()
+ }
+ }()
+ // Send connection preface to server.
+ n, err := conn.Write(clientPreface)
+ if err != nil {
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ if n != len(clientPreface) {
+ return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ }
+ framer := newFramer(conn)
+ if initialWindowSize != defaultWindowSize {
+ err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
+ } else {
+ err = framer.writeSettings(true)
+ }
+ if err != nil {
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ // Adjust the connection flow control window if needed.
+ if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
+ if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ }
+ ua := primaryUA
+ if opts.UserAgent != "" {
+ ua = opts.UserAgent + " " + ua
+ }
+ var buf bytes.Buffer
+ t := &http2Client{
+ target: addr,
+ userAgent: ua,
+ conn: conn,
+ authInfo: authInfo,
+ // The client initiated stream id is odd starting from 1.
+ nextID: 1,
+ writableChan: make(chan int, 1),
+ shutdownChan: make(chan struct{}),
+ errorChan: make(chan struct{}),
+ framer: framer,
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ controlBuf: newRecvBuffer(),
+ fc: &inFlow{limit: initialConnWindowSize},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ scheme: scheme,
+ state: reachable,
+ activeStreams: make(map[uint32]*Stream),
+ authCreds: opts.AuthOptions,
+ maxStreams: math.MaxInt32,
+ streamSendQuota: defaultWindowSize,
+ }
+ go t.controller()
+ t.writableChan <- 0
+ // Start the reader goroutine for incoming message. The threading model
+ // on receiving is that each transport has a dedicated goroutine which
+ // reads HTTP2 frame from network. Then it dispatches the frame to the
+ // corresponding stream entity.
+ go t.reader()
+ return t, nil
+}
+
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+ fc := &inFlow{
+ limit: initialWindowSize,
+ conn: t.fc,
+ }
+ // TODO(zhaoq): Handle uint32 overflow of Stream.id.
+ s := &Stream{
+ id: t.nextID,
+ method: callHdr.Method,
+ buf: newRecvBuffer(),
+ fc: fc,
+ sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
+ headerChan: make(chan struct{}),
+ }
+ t.nextID += 2
+ s.windowHandler = func(n int) {
+ t.updateWindow(s, uint32(n))
+ }
+ // Make a stream be able to cancel the pending operations by itself.
+ s.ctx, s.cancel = context.WithCancel(ctx)
+ s.dec = &recvBufferReader{
+ ctx: s.ctx,
+ recv: s.buf,
+ }
+ return s
+}
+
+// NewStream creates a stream and register it into the transport as "active"
+// streams.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
+ // Record the timeout value on the context.
+ var timeout time.Duration
+ if dl, ok := ctx.Deadline(); ok {
+ timeout = dl.Sub(time.Now())
+ if timeout <= 0 {
+ return nil, ContextErr(context.DeadlineExceeded)
+ }
+ }
+ // Attach Auth info if there is any.
+ if t.authInfo != nil {
+ ctx = credentials.NewContext(ctx, t.authInfo)
+ }
+ authData := make(map[string]string)
+ for _, c := range t.authCreds {
+ // Construct URI required to get auth request metadata.
+ var port string
+ if pos := strings.LastIndex(t.target, ":"); pos != -1 {
+ // Omit port if it is the default one.
+ if t.target[pos+1:] != "443" {
+ port = ":" + t.target[pos+1:]
+ }
+ }
+ pos := strings.LastIndex(callHdr.Method, "/")
+ if pos == -1 {
+ return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
+ }
+ audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
+ data, err := c.GetRequestMetadata(ctx, audience)
+ if err != nil {
+ return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err)
+ }
+ for k, v := range data {
+ authData[k] = v
+ }
+ }
+ t.mu.Lock()
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ checkStreamsQuota := t.streamsQuota != nil
+ t.mu.Unlock()
+ if checkStreamsQuota {
+ sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire())
+ if err != nil {
+ return nil, err
+ }
+ // Returns the quota balance back.
+ if sq > 1 {
+ t.streamsQuota.add(sq - 1)
+ }
+ }
+ if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil {
+ // t.streamsQuota will be updated when t.CloseStream is invoked.
+ return nil, err
+ }
+ t.mu.Lock()
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ s := t.newStream(ctx, callHdr)
+ t.activeStreams[s.id] = s
+
+ // This stream is not counted when applySetings(...) initialize t.streamsQuota.
+ // Reset t.streamsQuota to the right value.
+ var reset bool
+ if !checkStreamsQuota && t.streamsQuota != nil {
+ reset = true
+ }
+ t.mu.Unlock()
+ if reset {
+ t.streamsQuota.reset(-1)
+ }
+
+ // HPACK encodes various headers. Note that once WriteField(...) is
+ // called, the corresponding headers/continuation frame has to be sent
+ // because hpack.Encoder is stateful.
+ t.hBuf.Reset()
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
+
+ if timeout > 0 {
+ t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)})
+ }
+ for k, v := range authData {
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ }
+ var (
+ hasMD bool
+ endHeaders bool
+ )
+ if md, ok := metadata.FromContext(ctx); ok {
+ hasMD = true
+ for k, v := range md {
+ for _, entry := range v {
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ }
+ }
+ }
+ first := true
+ // Sends the headers in a single batch even when they span multiple frames.
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ if first {
+ // Sends a HeadersFrame to server to start a new stream.
+ p := http2.HeadersFrameParam{
+ StreamID: s.id,
+ BlockFragment: t.hBuf.Next(size),
+ EndStream: false,
+ EndHeaders: endHeaders,
+ }
+ // Do a force flush for the buffered frames iff it is the last headers frame
+ // and there is header metadata to be sent. Otherwise, there is flushing until
+ // the corresponding data frame is written.
+ err = t.framer.writeHeaders(hasMD && endHeaders, p)
+ first = false
+ } else {
+ // Sends Continuation frames for the leftover headers.
+ err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size))
+ }
+ if err != nil {
+ t.notifyError(err)
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ }
+ t.writableChan <- 0
+ return s, nil
+}
+
+// CloseStream clears the footprint of a stream when the stream is not needed any more.
+// This must not be executed in reader's goroutine.
+func (t *http2Client) CloseStream(s *Stream, err error) {
+ var updateStreams bool
+ t.mu.Lock()
+ if t.streamsQuota != nil {
+ updateStreams = true
+ }
+ delete(t.activeStreams, s.id)
+ t.mu.Unlock()
+ if updateStreams {
+ t.streamsQuota.add(1)
+ }
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), the caller needs
+ // to call cancel on the stream to interrupt the blocking on
+ // other goroutines.
+ s.cancel()
+ s.mu.Lock()
+ if q := s.fc.restoreConn(); q > 0 {
+ t.controlBuf.put(&windowUpdate{0, q})
+ }
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.state = streamDone
+ s.mu.Unlock()
+ if _, ok := err.(StreamError); ok {
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel})
+ }
+}
+
+// Close kicks off the shutdown process of the transport. This should be called
+// only once on a transport. Once it is called, the transport should not be
+// accessed any more.
+func (t *http2Client) Close() (err error) {
+ t.mu.Lock()
+ if t.state == closing {
+ t.mu.Unlock()
+ return errors.New("transport: Close() was already called")
+ }
+ t.state = closing
+ t.mu.Unlock()
+ close(t.shutdownChan)
+ err = t.conn.Close()
+ t.mu.Lock()
+ streams := t.activeStreams
+ t.activeStreams = nil
+ t.mu.Unlock()
+ // Notify all active streams.
+ for _, s := range streams {
+ s.mu.Lock()
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: ErrConnClosing})
+ }
+ return
+}
+
+// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
+// should proceed only if Write returns nil.
+// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later
+// if it improves the performance.
+func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
+ r := bytes.NewBuffer(data)
+ for {
+ var p []byte
+ if r.Len() > 0 {
+ size := http2MaxFrameLen
+ s.sendQuotaPool.add(0)
+ // Wait until the stream has some quota to send the data.
+ sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())
+ if err != nil {
+ return err
+ }
+ t.sendQuotaPool.add(0)
+ // Wait until the transport has some quota to send the data.
+ tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())
+ if err != nil {
+ if _, ok := err.(StreamError); ok {
+ t.sendQuotaPool.cancel()
+ }
+ return err
+ }
+ if sq < size {
+ size = sq
+ }
+ if tq < size {
+ size = tq
+ }
+ p = r.Next(size)
+ ps := len(p)
+ if ps < sq {
+ // Overbooked stream quota. Return it back.
+ s.sendQuotaPool.add(sq - ps)
+ }
+ if ps < tq {
+ // Overbooked transport quota. Return it back.
+ t.sendQuotaPool.add(tq - ps)
+ }
+ }
+ var (
+ endStream bool
+ forceFlush bool
+ )
+ if opts.Last && r.Len() == 0 {
+ endStream = true
+ }
+ // Indicate there is a writer who is about to write a data frame.
+ t.framer.adjustNumWriters(1)
+ // Got some quota. Try to acquire writing privilege on the transport.
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ if t.framer.adjustNumWriters(-1) == 0 {
+ // This writer is the last one in this batch and has the
+ // responsibility to flush the buffered frames. It queues
+ // a flush request to controlBuf instead of flushing directly
+ // in order to avoid the race with other writing or flushing.
+ t.controlBuf.put(&flushIO{})
+ }
+ return err
+ }
+ if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {
+ // Do a force flush iff this is last frame for the entire gRPC message
+ // and the caller is the only writer at this moment.
+ forceFlush = true
+ }
+ // If WriteData fails, all the pending streams will be handled
+ // by http2Client.Close(). No explicit CloseStream() needs to be
+ // invoked.
+ if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {
+ t.notifyError(err)
+ return ConnectionErrorf("transport: %v", err)
+ }
+ if t.framer.adjustNumWriters(-1) == 0 {
+ t.framer.flushWrite()
+ }
+ t.writableChan <- 0
+ if r.Len() == 0 {
+ break
+ }
+ }
+ if !opts.Last {
+ return nil
+ }
+ s.mu.Lock()
+ if s.state != streamDone {
+ if s.state == streamReadDone {
+ s.state = streamDone
+ } else {
+ s.state = streamWriteDone
+ }
+ }
+ s.mu.Unlock()
+ return nil
+}
+
+func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.activeStreams == nil {
+ // The transport is closing.
+ return nil, false
+ }
+ if s, ok := t.activeStreams[f.Header().StreamID]; ok {
+ return s, true
+ }
+ return nil, false
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Client) updateWindow(s *Stream, n uint32) {
+ swu, cwu := s.fc.onRead(n)
+ if swu > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, swu})
+ }
+ if cwu > 0 {
+ t.controlBuf.put(&windowUpdate{0, cwu})
+ }
+}
+
+func (t *http2Client) handleData(f *http2.DataFrame) {
+ // Select the right stream to dispatch.
+ s, ok := t.getStream(f)
+ if !ok {
+ return
+ }
+ size := len(f.Data())
+ if size > 0 {
+ if err := s.fc.onData(uint32(size)); err != nil {
+ if _, ok := err.(ConnectionError); ok {
+ t.notifyError(err)
+ return
+ }
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ s.state = streamDone
+ s.statusCode = codes.Internal
+ s.statusDesc = err.Error()
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
+ return
+ }
+ // TODO(bradfitz, zhaoq): A copy is required here because there is no
+ // guarantee f.Data() is consumed before the arrival of next frame.
+ // Can this copy be eliminated?
+ data := make([]byte, size)
+ copy(data, f.Data())
+ s.write(recvMsg{data: data})
+ }
+ // The server has closed the stream without sending trailers. Record that
+ // the read direction is closed, and set the status appropriately.
+ if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+ s.mu.Lock()
+ if s.state == streamWriteDone {
+ s.state = streamDone
+ } else {
+ s.state = streamReadDone
+ }
+ s.statusCode = codes.Internal
+ s.statusDesc = "server closed the stream without sending trailers"
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+ }
+}
+
+func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
+ s, ok := t.getStream(f)
+ if !ok {
+ return
+ }
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ s.state = streamDone
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)]
+ if !ok {
+ grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+}
+
+func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
+ if f.IsAck() {
+ return
+ }
+ var ss []http2.Setting
+ f.ForeachSetting(func(s http2.Setting) error {
+ ss = append(ss, s)
+ return nil
+ })
+ // The settings will be applied once the ack is sent.
+ t.controlBuf.put(&settings{ack: true, ss: ss})
+}
+
+func (t *http2Client) handlePing(f *http2.PingFrame) {
+ t.controlBuf.put(&ping{true})
+}
+
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+ // TODO(zhaoq): GoAwayFrame handler to be implemented
+}
+
+func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+ id := f.Header().StreamID
+ incr := f.Increment
+ if id == 0 {
+ t.sendQuotaPool.add(int(incr))
+ return
+ }
+ if s, ok := t.getStream(f); ok {
+ s.sendQuotaPool.add(int(incr))
+ }
+}
+
+// operateHeader takes action on the decoded headers. It returns the current
+// stream if there are remaining headers on the wire (in the following
+// Continuation frame).
+func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) {
+ defer func() {
+ if pendingStream == nil {
+ hDec.state = decodeState{}
+ }
+ }()
+ endHeaders, err := hDec.decodeClientHTTP2Headers(frame)
+ if s == nil {
+ // s has been closed.
+ return nil
+ }
+ if err != nil {
+ s.write(recvMsg{err: err})
+ // Something wrong. Stops reading even when there is remaining.
+ return nil
+ }
+ if !endHeaders {
+ return s
+ }
+
+ s.mu.Lock()
+ if !s.headerDone {
+ if !endStream && len(hDec.state.mdata) > 0 {
+ s.header = hDec.state.mdata
+ }
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ if !endStream || s.state == streamDone {
+ s.mu.Unlock()
+ return nil
+ }
+
+ if len(hDec.state.mdata) > 0 {
+ s.trailer = hDec.state.mdata
+ }
+ s.state = streamDone
+ s.statusCode = hDec.state.statusCode
+ s.statusDesc = hDec.state.statusDesc
+ s.mu.Unlock()
+
+ s.write(recvMsg{err: io.EOF})
+ return nil
+}
+
+// reader runs as a separate goroutine in charge of reading data from network
+// connection.
+//
+// TODO(zhaoq): currently one reader per transport. Investigate whether this is
+// optimal.
+// TODO(zhaoq): Check the validity of the incoming frame sequence.
+func (t *http2Client) reader() {
+ // Check the validity of server preface.
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ t.notifyError(err)
+ return
+ }
+ sf, ok := frame.(*http2.SettingsFrame)
+ if !ok {
+ t.notifyError(err)
+ return
+ }
+ t.handleSettings(sf)
+
+ hDec := newHPACKDecoder()
+ var curStream *Stream
+ // loop to keep reading incoming messages on this transport.
+ for {
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ t.notifyError(err)
+ return
+ }
+ switch frame := frame.(type) {
+ case *http2.HeadersFrame:
+ // operateHeaders has to be invoked regardless the value of curStream
+ // because the HPACK decoder needs to be updated using the received
+ // headers.
+ curStream, _ = t.getStream(frame)
+ endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)
+ curStream = t.operateHeaders(hDec, curStream, frame, endStream)
+ case *http2.ContinuationFrame:
+ curStream = t.operateHeaders(hDec, curStream, frame, false)
+ case *http2.DataFrame:
+ t.handleData(frame)
+ case *http2.RSTStreamFrame:
+ t.handleRSTStream(frame)
+ case *http2.SettingsFrame:
+ t.handleSettings(frame)
+ case *http2.PingFrame:
+ t.handlePing(frame)
+ case *http2.GoAwayFrame:
+ t.handleGoAway(frame)
+ case *http2.WindowUpdateFrame:
+ t.handleWindowUpdate(frame)
+ default:
+ grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ }
+ }
+}
+
+func (t *http2Client) applySettings(ss []http2.Setting) {
+ for _, s := range ss {
+ switch s.ID {
+ case http2.SettingMaxConcurrentStreams:
+ // TODO(zhaoq): This is a hack to avoid significant refactoring of the
+ // code to deal with the unrealistic int32 overflow. Probably will try
+ // to find a better way to handle this later.
+ if s.Val > math.MaxInt32 {
+ s.Val = math.MaxInt32
+ }
+ t.mu.Lock()
+ reset := t.streamsQuota != nil
+ if !reset {
+ t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams))
+ }
+ ms := t.maxStreams
+ t.maxStreams = int(s.Val)
+ t.mu.Unlock()
+ if reset {
+ t.streamsQuota.reset(int(s.Val) - ms)
+ }
+ case http2.SettingInitialWindowSize:
+ t.mu.Lock()
+ for _, stream := range t.activeStreams {
+ // Adjust the sending quota for each stream.
+ stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+ }
+ t.streamSendQuota = s.Val
+ t.mu.Unlock()
+ }
+ }
+}
+
+// controller running in a separate goroutine takes charge of sending control
+// frames (e.g., window update, reset stream, setting, etc.) to the server.
+func (t *http2Client) controller() {
+ for {
+ select {
+ case i := <-t.controlBuf.get():
+ t.controlBuf.load()
+ select {
+ case <-t.writableChan:
+ switch i := i.(type) {
+ case *windowUpdate:
+ t.framer.writeWindowUpdate(true, i.streamID, i.increment)
+ case *settings:
+ if i.ack {
+ t.framer.writeSettingsAck(true)
+ t.applySettings(i.ss)
+ } else {
+ t.framer.writeSettings(true, i.ss...)
+ }
+ case *resetStream:
+ t.framer.writeRSTStream(true, i.streamID, i.code)
+ case *flushIO:
+ t.framer.flushWrite()
+ case *ping:
+ // TODO(zhaoq): Ack with all-0 data now. will change to some
+ // meaningful content when this is actually in use.
+ t.framer.writePing(true, i.ack, [8]byte{})
+ default:
+ grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i)
+ }
+ t.writableChan <- 0
+ continue
+ case <-t.shutdownChan:
+ return
+ }
+ case <-t.shutdownChan:
+ return
+ }
+ }
+}
+
+func (t *http2Client) Error() <-chan struct{} {
+ return t.errorChan
+}
+
+func (t *http2Client) notifyError(err error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ // make sure t.errorChan is closed only once.
+ if t.state == reachable {
+ t.state = unreachable
+ close(t.errorChan)
+ grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err)
+ }
+}
diff --git a/vendor/src/google.golang.org/grpc/transport/http2_server.go b/vendor/src/google.golang.org/grpc/transport/http2_server.go
new file mode 100644
index 0000000000..f3488f83dc
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/transport/http2_server.go
@@ -0,0 +1,695 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "math"
+ "net"
+ "strconv"
+ "sync"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+)
+
+// ErrIllegalHeaderWrite indicates that setting header is illegal because of
+// the stream's state.
+var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+
+// http2Server implements the ServerTransport interface with HTTP2.
+type http2Server struct {
+ conn net.Conn
+ maxStreamID uint32 // max stream ID ever seen
+ authInfo credentials.AuthInfo // auth info about the connection
+ // writableChan synchronizes write access to the transport.
+ // A writer acquires the write lock by sending a value on writableChan
+ // and releases it by receiving from writableChan.
+ writableChan chan int
+ // shutdownChan is closed when Close is called.
+ // Blocking operations should select on shutdownChan to avoid
+ // blocking forever after Close.
+ shutdownChan chan struct{}
+ framer *framer
+ hBuf *bytes.Buffer // the buffer for HPACK encoding
+ hEnc *hpack.Encoder // HPACK encoder
+
+ // The max number of concurrent streams.
+ maxStreams uint32
+ // controlBuf delivers all the control related tasks (e.g., window
+ // updates, reset streams, and various settings) to the controller.
+ controlBuf *recvBuffer
+ fc *inFlow
+ // sendQuotaPool provides flow control to outbound message.
+ sendQuotaPool *quotaPool
+
+ mu sync.Mutex // guard the following
+ state transportState
+ activeStreams map[uint32]*Stream
+ // the per-stream outbound flow control window size set by the peer.
+ streamSendQuota uint32
+}
+
+// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
+// returned if something goes wrong.
+func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
+ framer := newFramer(conn)
+ // Send initial settings as connection preface to client.
+ var settings []http2.Setting
+ // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
+ // permitted in the HTTP2 spec.
+ if maxStreams == 0 {
+ maxStreams = math.MaxUint32
+ } else {
+ settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams})
+ }
+ if initialWindowSize != defaultWindowSize {
+ settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
+ }
+ if err := framer.writeSettings(true, settings...); err != nil {
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ // Adjust the connection flow control window if needed.
+ if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
+ if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ }
+ var buf bytes.Buffer
+ t := &http2Server{
+ conn: conn,
+ authInfo: authInfo,
+ framer: framer,
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ maxStreams: maxStreams,
+ controlBuf: newRecvBuffer(),
+ fc: &inFlow{limit: initialConnWindowSize},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ state: reachable,
+ writableChan: make(chan int, 1),
+ shutdownChan: make(chan struct{}),
+ activeStreams: make(map[uint32]*Stream),
+ streamSendQuota: defaultWindowSize,
+ }
+ go t.controller()
+ t.writableChan <- 0
+ return t, nil
+}
+
+// operateHeader takes action on the decoded headers. It returns the current
+// stream if there are remaining headers on the wire (in the following
+// Continuation frame).
+func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) {
+ defer func() {
+ if pendingStream == nil {
+ hDec.state = decodeState{}
+ }
+ }()
+ endHeaders, err := hDec.decodeServerHTTP2Headers(frame)
+ if s == nil {
+ // s has been closed.
+ return nil
+ }
+ if err != nil {
+ grpclog.Printf("transport: http2Server.operateHeader found %v", err)
+ if se, ok := err.(StreamError); ok {
+ t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
+ }
+ return nil
+ }
+ if endStream {
+ // s is just created by the caller. No lock needed.
+ s.state = streamReadDone
+ }
+ if !endHeaders {
+ return s
+ }
+ t.mu.Lock()
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil
+ }
+ if uint32(len(t.activeStreams)) >= t.maxStreams {
+ t.mu.Unlock()
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
+ return nil
+ }
+ s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
+ t.activeStreams[s.id] = s
+ t.mu.Unlock()
+ s.windowHandler = func(n int) {
+ t.updateWindow(s, uint32(n))
+ }
+ if hDec.state.timeoutSet {
+ s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout)
+ } else {
+ s.ctx, s.cancel = context.WithCancel(context.TODO())
+ }
+ // Attach Auth info if there is any.
+ if t.authInfo != nil {
+ s.ctx = credentials.NewContext(s.ctx, t.authInfo)
+ }
+ // Cache the current stream to the context so that the server application
+ // can find out. Required when the server wants to send some metadata
+ // back to the client (unary call only).
+ s.ctx = newContextWithStream(s.ctx, s)
+ // Attach the received metadata to the context.
+ if len(hDec.state.mdata) > 0 {
+ s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata)
+ }
+
+ s.dec = &recvBufferReader{
+ ctx: s.ctx,
+ recv: s.buf,
+ }
+ s.method = hDec.state.method
+ handle(s)
+ return nil
+}
+
+// HandleStreams receives incoming streams using the given handler. This is
+// typically run in a separate goroutine.
+func (t *http2Server) HandleStreams(handle func(*Stream)) {
+ // Check the validity of client preface.
+ preface := make([]byte, len(clientPreface))
+ if _, err := io.ReadFull(t.conn, preface); err != nil {
+ grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
+ t.Close()
+ return
+ }
+ if !bytes.Equal(preface, clientPreface) {
+ grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
+ t.Close()
+ return
+ }
+
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+ t.Close()
+ return
+ }
+ sf, ok := frame.(*http2.SettingsFrame)
+ if !ok {
+ grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
+ t.Close()
+ return
+ }
+ t.handleSettings(sf)
+
+ hDec := newHPACKDecoder()
+ var curStream *Stream
+ for {
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ t.Close()
+ return
+ }
+ switch frame := frame.(type) {
+ case *http2.HeadersFrame:
+ id := frame.Header().StreamID
+ if id%2 != 1 || id <= t.maxStreamID {
+ // illegal gRPC stream id.
+ grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id)
+ t.Close()
+ break
+ }
+ t.maxStreamID = id
+ buf := newRecvBuffer()
+ fc := &inFlow{
+ limit: initialWindowSize,
+ conn: t.fc,
+ }
+ curStream = &Stream{
+ id: frame.Header().StreamID,
+ st: t,
+ buf: buf,
+ fc: fc,
+ }
+ endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)
+ curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle)
+ case *http2.ContinuationFrame:
+ curStream = t.operateHeaders(hDec, curStream, frame, false, handle)
+ case *http2.DataFrame:
+ t.handleData(frame)
+ case *http2.RSTStreamFrame:
+ t.handleRSTStream(frame)
+ case *http2.SettingsFrame:
+ t.handleSettings(frame)
+ case *http2.PingFrame:
+ t.handlePing(frame)
+ case *http2.WindowUpdateFrame:
+ t.handleWindowUpdate(frame)
+ case *http2.GoAwayFrame:
+ break
+ default:
+ grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ }
+ }
+}
+
+func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.activeStreams == nil {
+ // The transport is closing.
+ return nil, false
+ }
+ s, ok := t.activeStreams[f.Header().StreamID]
+ if !ok {
+ // The stream is already done.
+ return nil, false
+ }
+ return s, true
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Server) updateWindow(s *Stream, n uint32) {
+ swu, cwu := s.fc.onRead(n)
+ if swu > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, swu})
+ }
+ if cwu > 0 {
+ t.controlBuf.put(&windowUpdate{0, cwu})
+ }
+}
+
+func (t *http2Server) handleData(f *http2.DataFrame) {
+ // Select the right stream to dispatch.
+ s, ok := t.getStream(f)
+ if !ok {
+ return
+ }
+ size := len(f.Data())
+ if size > 0 {
+ if err := s.fc.onData(uint32(size)); err != nil {
+ if _, ok := err.(ConnectionError); ok {
+ grpclog.Printf("transport: http2Server %v", err)
+ t.Close()
+ return
+ }
+ t.closeStream(s)
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
+ return
+ }
+ // TODO(bradfitz, zhaoq): A copy is required here because there is no
+ // guarantee f.Data() is consumed before the arrival of next frame.
+ // Can this copy be eliminated?
+ data := make([]byte, size)
+ copy(data, f.Data())
+ s.write(recvMsg{data: data})
+ }
+ if f.Header().Flags.Has(http2.FlagDataEndStream) {
+ // Received the end of stream from the client.
+ s.mu.Lock()
+ if s.state != streamDone {
+ if s.state == streamWriteDone {
+ s.state = streamDone
+ } else {
+ s.state = streamReadDone
+ }
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+ }
+}
+
+func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
+ s, ok := t.getStream(f)
+ if !ok {
+ return
+ }
+ t.closeStream(s)
+}
+
+func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
+ if f.IsAck() {
+ return
+ }
+ var ss []http2.Setting
+ f.ForeachSetting(func(s http2.Setting) error {
+ ss = append(ss, s)
+ return nil
+ })
+ // The settings will be applied once the ack is sent.
+ t.controlBuf.put(&settings{ack: true, ss: ss})
+}
+
+func (t *http2Server) handlePing(f *http2.PingFrame) {
+ t.controlBuf.put(&ping{true})
+}
+
+func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+ id := f.Header().StreamID
+ incr := f.Increment
+ if id == 0 {
+ t.sendQuotaPool.add(int(incr))
+ return
+ }
+ if s, ok := t.getStream(f); ok {
+ s.sendQuotaPool.add(int(incr))
+ }
+}
+
+func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
+ first := true
+ endHeaders := false
+ var err error
+ // Sends the headers in a single batch.
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ if first {
+ p := http2.HeadersFrameParam{
+ StreamID: s.id,
+ BlockFragment: b.Next(size),
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ }
+ err = t.framer.writeHeaders(endHeaders, p)
+ first = false
+ } else {
+ err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
+ }
+ if err != nil {
+ t.Close()
+ return ConnectionErrorf("transport: %v", err)
+ }
+ }
+ return nil
+}
+
+// WriteHeader sends the header metedata md back to the client.
+func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
+ s.mu.Lock()
+ if s.headerOk || s.state == streamDone {
+ s.mu.Unlock()
+ return ErrIllegalHeaderWrite
+ }
+ s.headerOk = true
+ s.mu.Unlock()
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ return err
+ }
+ t.hBuf.Reset()
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ for k, v := range md {
+ for _, entry := range v {
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ }
+ }
+ if err := t.writeHeaders(s, t.hBuf, false); err != nil {
+ return err
+ }
+ t.writableChan <- 0
+ return nil
+}
+
+// WriteStatus sends stream status to the client and terminates the stream.
+// There is no further I/O operations being able to perform on this stream.
+// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
+// OK is adopted.
+func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
+ var headersSent bool
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return nil
+ }
+ if s.headerOk {
+ headersSent = true
+ }
+ s.mu.Unlock()
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ return err
+ }
+ t.hBuf.Reset()
+ if !headersSent {
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ }
+ t.hEnc.WriteField(
+ hpack.HeaderField{
+ Name: "grpc-status",
+ Value: strconv.Itoa(int(statusCode)),
+ })
+ t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc})
+ // Attach the trailer metadata.
+ for k, v := range s.trailer {
+ for _, entry := range v {
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ }
+ }
+ if err := t.writeHeaders(s, t.hBuf, true); err != nil {
+ t.Close()
+ return err
+ }
+ t.closeStream(s)
+ t.writableChan <- 0
+ return nil
+}
+
+// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
+// is returns if it fails (e.g., framing error, transport error).
+func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
+ // TODO(zhaoq): Support multi-writers for a single stream.
+ var writeHeaderFrame bool
+ s.mu.Lock()
+ if !s.headerOk {
+ writeHeaderFrame = true
+ s.headerOk = true
+ }
+ s.mu.Unlock()
+ if writeHeaderFrame {
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ return err
+ }
+ t.hBuf.Reset()
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ p := http2.HeadersFrameParam{
+ StreamID: s.id,
+ BlockFragment: t.hBuf.Bytes(),
+ EndHeaders: true,
+ }
+ if err := t.framer.writeHeaders(false, p); err != nil {
+ t.Close()
+ return ConnectionErrorf("transport: %v", err)
+ }
+ t.writableChan <- 0
+ }
+ r := bytes.NewBuffer(data)
+ for {
+ if r.Len() == 0 {
+ return nil
+ }
+ size := http2MaxFrameLen
+ s.sendQuotaPool.add(0)
+ // Wait until the stream has some quota to send the data.
+ sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())
+ if err != nil {
+ return err
+ }
+ t.sendQuotaPool.add(0)
+ // Wait until the transport has some quota to send the data.
+ tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())
+ if err != nil {
+ if _, ok := err.(StreamError); ok {
+ t.sendQuotaPool.cancel()
+ }
+ return err
+ }
+ if sq < size {
+ size = sq
+ }
+ if tq < size {
+ size = tq
+ }
+ p := r.Next(size)
+ ps := len(p)
+ if ps < sq {
+ // Overbooked stream quota. Return it back.
+ s.sendQuotaPool.add(sq - ps)
+ }
+ if ps < tq {
+ // Overbooked transport quota. Return it back.
+ t.sendQuotaPool.add(tq - ps)
+ }
+ t.framer.adjustNumWriters(1)
+ // Got some quota. Try to acquire writing privilege on the
+ // transport.
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ if t.framer.adjustNumWriters(-1) == 0 {
+ // This writer is the last one in this batch and has the
+ // responsibility to flush the buffered frames. It queues
+ // a flush request to controlBuf instead of flushing directly
+ // in order to avoid the race with other writing or flushing.
+ t.controlBuf.put(&flushIO{})
+ }
+ return err
+ }
+ var forceFlush bool
+ if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
+ forceFlush = true
+ }
+ if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
+ t.Close()
+ return ConnectionErrorf("transport: %v", err)
+ }
+ if t.framer.adjustNumWriters(-1) == 0 {
+ t.framer.flushWrite()
+ }
+ t.writableChan <- 0
+ }
+
+}
+
+func (t *http2Server) applySettings(ss []http2.Setting) {
+ for _, s := range ss {
+ if s.ID == http2.SettingInitialWindowSize {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for _, stream := range t.activeStreams {
+ stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+ }
+ t.streamSendQuota = s.Val
+ }
+
+ }
+}
+
+// controller running in a separate goroutine takes charge of sending control
+// frames (e.g., window update, reset stream, setting, etc.) to the server.
+func (t *http2Server) controller() {
+ for {
+ select {
+ case i := <-t.controlBuf.get():
+ t.controlBuf.load()
+ select {
+ case <-t.writableChan:
+ switch i := i.(type) {
+ case *windowUpdate:
+ t.framer.writeWindowUpdate(true, i.streamID, i.increment)
+ case *settings:
+ if i.ack {
+ t.framer.writeSettingsAck(true)
+ t.applySettings(i.ss)
+ } else {
+ t.framer.writeSettings(true, i.ss...)
+ }
+ case *resetStream:
+ t.framer.writeRSTStream(true, i.streamID, i.code)
+ case *flushIO:
+ t.framer.flushWrite()
+ case *ping:
+ // TODO(zhaoq): Ack with all-0 data now. will change to some
+ // meaningful content when this is actually in use.
+ t.framer.writePing(true, i.ack, [8]byte{})
+ default:
+ grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
+ }
+ t.writableChan <- 0
+ continue
+ case <-t.shutdownChan:
+ return
+ }
+ case <-t.shutdownChan:
+ return
+ }
+ }
+}
+
+// Close starts shutting down the http2Server transport.
+// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
+// could cause some resource issue. Revisit this later.
+func (t *http2Server) Close() (err error) {
+ t.mu.Lock()
+ if t.state == closing {
+ t.mu.Unlock()
+ return errors.New("transport: Close() was already called")
+ }
+ t.state = closing
+ streams := t.activeStreams
+ t.activeStreams = nil
+ t.mu.Unlock()
+ close(t.shutdownChan)
+ err = t.conn.Close()
+ // Notify all active streams.
+ for _, s := range streams {
+ s.write(recvMsg{err: ErrConnClosing})
+ }
+ return
+}
+
+// closeStream clears the footprint of a stream when the stream is not needed
+// any more.
+func (t *http2Server) closeStream(s *Stream) {
+ t.mu.Lock()
+ delete(t.activeStreams, s.id)
+ t.mu.Unlock()
+ if q := s.fc.restoreConn(); q > 0 {
+ t.controlBuf.put(&windowUpdate{0, q})
+ }
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ s.state = streamDone
+ s.mu.Unlock()
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), the caller needs
+ // to call cancel on the stream to interrupt the blocking on
+ // other goroutines.
+ s.cancel()
+}
+
+func (t *http2Server) RemoteAddr() net.Addr {
+ return t.conn.RemoteAddr()
+}
diff --git a/vendor/src/google.golang.org/grpc/transport/http_util.go b/vendor/src/google.golang.org/grpc/transport/http_util.go
new file mode 100644
index 0000000000..fec4e4755d
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/transport/http_util.go
@@ -0,0 +1,451 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+)
+
+const (
+ // The primary user agent
+ primaryUA = "grpc-go/0.11"
+ // http2MaxFrameLen specifies the max length of a HTTP2 frame.
+ http2MaxFrameLen = 16384 // 16KB frame
+ // http://http2.github.io/http2-spec/#SettingValues
+ http2InitHeaderTableSize = 4096
+ // http2IOBufSize specifies the buffer size for sending frames.
+ http2IOBufSize = 32 * 1024
+)
+
+var (
+ clientPreface = []byte(http2.ClientPreface)
+ http2RSTErrConvTab = map[http2.ErrCode]codes.Code{
+ http2.ErrCodeNo: codes.Internal,
+ http2.ErrCodeProtocol: codes.Internal,
+ http2.ErrCodeInternal: codes.Internal,
+ http2.ErrCodeFlowControl: codes.ResourceExhausted,
+ http2.ErrCodeSettingsTimeout: codes.Internal,
+ http2.ErrCodeFrameSize: codes.Internal,
+ http2.ErrCodeRefusedStream: codes.Unavailable,
+ http2.ErrCodeCancel: codes.Canceled,
+ http2.ErrCodeCompression: codes.Internal,
+ http2.ErrCodeConnect: codes.Internal,
+ http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
+ http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
+ }
+ statusCodeConvTab = map[codes.Code]http2.ErrCode{
+ codes.Internal: http2.ErrCodeInternal,
+ codes.Canceled: http2.ErrCodeCancel,
+ codes.Unavailable: http2.ErrCodeRefusedStream,
+ codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
+ codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
+ }
+)
+
+// Records the states during HPACK decoding. Must be reset once the
+// decoding of the entire headers are finished.
+type decodeState struct {
+ // statusCode caches the stream status received from the trailer
+ // the server sent. Client side only.
+ statusCode codes.Code
+ statusDesc string
+ // Server side only fields.
+ timeoutSet bool
+ timeout time.Duration
+ method string
+ // key-value metadata map from the peer.
+ mdata map[string][]string
+}
+
+// An hpackDecoder decodes HTTP2 headers which may span multiple frames.
+type hpackDecoder struct {
+ h *hpack.Decoder
+ state decodeState
+ err error // The err when decoding
+}
+
+// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame.
+type headerFrame interface {
+ Header() http2.FrameHeader
+ HeaderBlockFragment() []byte
+ HeadersEnded() bool
+}
+
+// isReservedHeader checks whether hdr belongs to HTTP2 headers
+// reserved by gRPC protocol. Any other headers are classified as the
+// user-specified metadata.
+func isReservedHeader(hdr string) bool {
+ if hdr[0] == ':' {
+ return true
+ }
+ switch hdr {
+ case "content-type",
+ "grpc-message-type",
+ "grpc-encoding",
+ "grpc-message",
+ "grpc-status",
+ "grpc-timeout",
+ "te":
+ return true
+ default:
+ return false
+ }
+}
+
+func newHPACKDecoder() *hpackDecoder {
+ d := &hpackDecoder{}
+ d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) {
+ switch f.Name {
+ case "content-type":
+ if !strings.Contains(f.Value, "application/grpc") {
+ d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header")
+ return
+ }
+ case "grpc-status":
+ code, err := strconv.Atoi(f.Value)
+ if err != nil {
+ d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
+ return
+ }
+ d.state.statusCode = codes.Code(code)
+ case "grpc-message":
+ d.state.statusDesc = f.Value
+ case "grpc-timeout":
+ d.state.timeoutSet = true
+ var err error
+ d.state.timeout, err = timeoutDecode(f.Value)
+ if err != nil {
+ d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
+ return
+ }
+ case ":path":
+ d.state.method = f.Value
+ default:
+ if !isReservedHeader(f.Name) {
+ if f.Name == "user-agent" {
+ i := strings.LastIndex(f.Value, " ")
+ if i == -1 {
+ // There is no application user agent string being set.
+ return
+ }
+ // Extract the application user agent string.
+ f.Value = f.Value[:i]
+ }
+ if d.state.mdata == nil {
+ d.state.mdata = make(map[string][]string)
+ }
+ k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
+ if err != nil {
+ grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
+ return
+ }
+ d.state.mdata[k] = append(d.state.mdata[k], v)
+ }
+ }
+ })
+ return d
+}
+
+func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {
+ d.err = nil
+ _, err = d.h.Write(frame.HeaderBlockFragment())
+ if err != nil {
+ err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err)
+ }
+
+ if frame.HeadersEnded() {
+ if closeErr := d.h.Close(); closeErr != nil && err == nil {
+ err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr)
+ }
+ endHeaders = true
+ }
+
+ if err == nil && d.err != nil {
+ err = d.err
+ }
+ return
+}
+
+func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {
+ d.err = nil
+ _, err = d.h.Write(frame.HeaderBlockFragment())
+ if err != nil {
+ err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err)
+ }
+
+ if frame.HeadersEnded() {
+ if closeErr := d.h.Close(); closeErr != nil && err == nil {
+ err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr)
+ }
+ endHeaders = true
+ }
+
+ if err == nil && d.err != nil {
+ err = d.err
+ }
+ return
+}
+
+type timeoutUnit uint8
+
+const (
+ hour timeoutUnit = 'H'
+ minute timeoutUnit = 'M'
+ second timeoutUnit = 'S'
+ millisecond timeoutUnit = 'm'
+ microsecond timeoutUnit = 'u'
+ nanosecond timeoutUnit = 'n'
+)
+
+func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
+ switch u {
+ case hour:
+ return time.Hour, true
+ case minute:
+ return time.Minute, true
+ case second:
+ return time.Second, true
+ case millisecond:
+ return time.Millisecond, true
+ case microsecond:
+ return time.Microsecond, true
+ case nanosecond:
+ return time.Nanosecond, true
+ default:
+ }
+ return
+}
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+ if m := d % r; m > 0 {
+ return int64(d/r + 1)
+ }
+ return int64(d / r)
+}
+
+// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
+func timeoutEncode(t time.Duration) string {
+ if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "n"
+ }
+ if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "u"
+ }
+ if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "m"
+ }
+ if d := div(t, time.Second); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "S"
+ }
+ if d := div(t, time.Minute); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "M"
+ }
+ // Note that maxTimeoutValue * time.Hour > MaxInt64.
+ return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+ size := len(s)
+ if size < 2 {
+ return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
+ }
+ unit := timeoutUnit(s[size-1])
+ d, ok := timeoutUnitToDuration(unit)
+ if !ok {
+ return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
+ }
+ t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return d * time.Duration(t), nil
+}
+
+type framer struct {
+ numWriters int32
+ reader io.Reader
+ writer *bufio.Writer
+ fr *http2.Framer
+}
+
+func newFramer(conn net.Conn) *framer {
+ f := &framer{
+ reader: conn,
+ writer: bufio.NewWriterSize(conn, http2IOBufSize),
+ }
+ f.fr = http2.NewFramer(f.writer, f.reader)
+ return f
+}
+
+func (f *framer) adjustNumWriters(i int32) int32 {
+ return atomic.AddInt32(&f.numWriters, i)
+}
+
+// The following writeXXX functions can only be called when the caller gets
+// unblocked from writableChan channel (i.e., owns the privilege to write).
+
+func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
+ if err := f.fr.WriteData(streamID, endStream, data); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
+ if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
+ if err := f.fr.WriteHeaders(p); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
+ if err := f.fr.WritePing(ack, data); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
+ if err := f.fr.WritePriority(streamID, p); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
+ if err := f.fr.WritePushPromise(p); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
+ if err := f.fr.WriteRSTStream(streamID, code); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
+ if err := f.fr.WriteSettings(settings...); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeSettingsAck(forceFlush bool) error {
+ if err := f.fr.WriteSettingsAck(); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
+ if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) flushWrite() error {
+ return f.writer.Flush()
+}
+
+func (f *framer) readFrame() (http2.Frame, error) {
+ return f.fr.ReadFrame()
+}
diff --git a/vendor/src/google.golang.org/grpc/transport/transport.go b/vendor/src/google.golang.org/grpc/transport/transport.go
new file mode 100644
index 0000000000..e1e7f5761a
--- /dev/null
+++ b/vendor/src/google.golang.org/grpc/transport/transport.go
@@ -0,0 +1,465 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+Package transport defines and implements message oriented communication channel
+to complete various transactions (e.g., an RPC).
+*/
+package transport // import "google.golang.org/grpc/transport"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+)
+
+// recvMsg represents the received msg from the transport. All transport
+// protocol specific info has been removed.
+type recvMsg struct {
+ data []byte
+ // nil: received some data
+ // io.EOF: stream is completed. data is nil.
+ // other non-nil error: transport failure. data is nil.
+ err error
+}
+
+func (recvMsg) isItem() bool {
+ return true
+}
+
+// All items in an out of a recvBuffer should be the same type.
+type item interface {
+ isItem() bool
+}
+
+// recvBuffer is an unbounded channel of item.
+type recvBuffer struct {
+ c chan item
+ mu sync.Mutex
+ backlog []item
+}
+
+func newRecvBuffer() *recvBuffer {
+ b := &recvBuffer{
+ c: make(chan item, 1),
+ }
+ return b
+}
+
+func (b *recvBuffer) put(r item) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.backlog = append(b.backlog, r)
+ select {
+ case b.c <- b.backlog[0]:
+ b.backlog = b.backlog[1:]
+ default:
+ }
+}
+
+func (b *recvBuffer) load() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if len(b.backlog) > 0 {
+ select {
+ case b.c <- b.backlog[0]:
+ b.backlog = b.backlog[1:]
+ default:
+ }
+ }
+}
+
+// get returns the channel that receives an item in the buffer.
+//
+// Upon receipt of an item, the caller should call load to send another
+// item onto the channel if there is any.
+func (b *recvBuffer) get() <-chan item {
+ return b.c
+}
+
+// recvBufferReader implements io.Reader interface to read the data from
+// recvBuffer.
+type recvBufferReader struct {
+ ctx context.Context
+ recv *recvBuffer
+ last *bytes.Reader // Stores the remaining data in the previous calls.
+ err error
+}
+
+// Read reads the next len(p) bytes from last. If last is drained, it tries to
+// read additional data from recv. It blocks if there no additional data available
+// in recv. If Read returns any non-nil error, it will continue to return that error.
+func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ defer func() { r.err = err }()
+ if r.last != nil && r.last.Len() > 0 {
+ // Read remaining data left in last call.
+ return r.last.Read(p)
+ }
+ select {
+ case <-r.ctx.Done():
+ return 0, ContextErr(r.ctx.Err())
+ case i := <-r.recv.get():
+ r.recv.load()
+ m := i.(*recvMsg)
+ if m.err != nil {
+ return 0, m.err
+ }
+ r.last = bytes.NewReader(m.data)
+ return r.last.Read(p)
+ }
+}
+
+type streamState uint8
+
+const (
+ streamActive streamState = iota
+ streamWriteDone // EndStream sent
+ streamReadDone // EndStream received
+ streamDone // sendDone and recvDone or RSTStreamFrame is sent or received.
+)
+
+// Stream represents an RPC in the transport layer.
+type Stream struct {
+ id uint32
+ // nil for client side Stream.
+ st ServerTransport
+ // ctx is the associated context of the stream.
+ ctx context.Context
+ cancel context.CancelFunc
+ // method records the associated RPC method of the stream.
+ method string
+ buf *recvBuffer
+ dec io.Reader
+ fc *inFlow
+ recvQuota uint32
+ // The accumulated inbound quota pending for window update.
+ updateQuota uint32
+ // The handler to control the window update procedure for both this
+ // particular stream and the associated transport.
+ windowHandler func(int)
+
+ sendQuotaPool *quotaPool
+ // Close headerChan to indicate the end of reception of header metadata.
+ headerChan chan struct{}
+ // header caches the received header metadata.
+ header metadata.MD
+ // The key-value map of trailer metadata.
+ trailer metadata.MD
+
+ mu sync.RWMutex // guard the following
+ // headerOK becomes true from the first header is about to send.
+ headerOk bool
+ state streamState
+ // true iff headerChan is closed. Used to avoid closing headerChan
+ // multiple times.
+ headerDone bool
+ // the status received from the server.
+ statusCode codes.Code
+ statusDesc string
+}
+
+// Header acquires the key-value pairs of header metadata once it
+// is available. It blocks until i) the metadata is ready or ii) there is no
+// header metadata or iii) the stream is cancelled/expired.
+func (s *Stream) Header() (metadata.MD, error) {
+ select {
+ case <-s.ctx.Done():
+ return nil, ContextErr(s.ctx.Err())
+ case <-s.headerChan:
+ return s.header.Copy(), nil
+ }
+}
+
+// Trailer returns the cached trailer metedata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD. Client
+// side only.
+func (s *Stream) Trailer() metadata.MD {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.trailer.Copy()
+}
+
+// ServerTransport returns the underlying ServerTransport for the stream.
+// The client side stream always returns nil.
+func (s *Stream) ServerTransport() ServerTransport {
+ return s.st
+}
+
+// Context returns the context of the stream.
+func (s *Stream) Context() context.Context {
+ return s.ctx
+}
+
+// TraceContext recreates the context of s with a trace.Trace.
+func (s *Stream) TraceContext(tr trace.Trace) {
+ s.ctx = trace.NewContext(s.ctx, tr)
+}
+
+// Method returns the method for the stream.
+func (s *Stream) Method() string {
+ return s.method
+}
+
+// StatusCode returns statusCode received from the server.
+func (s *Stream) StatusCode() codes.Code {
+ return s.statusCode
+}
+
+// StatusDesc returns statusDesc received from the server.
+func (s *Stream) StatusDesc() string {
+ return s.statusDesc
+}
+
+// ErrIllegalTrailerSet indicates that the trailer has already been set or it
+// is too late to do so.
+var ErrIllegalTrailerSet = errors.New("transport: trailer has been set")
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can only be called at most once. Server side only.
+func (s *Stream) SetTrailer(md metadata.MD) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.trailer != nil {
+ return ErrIllegalTrailerSet
+ }
+ s.trailer = md.Copy()
+ return nil
+}
+
+func (s *Stream) write(m recvMsg) {
+ s.buf.put(&m)
+}
+
+// Read reads all the data available for this Stream from the transport and
+// passes them into the decoder, which converts them into a gRPC message stream.
+// The error is io.EOF when the stream is done or another non-nil error if
+// the stream broke.
+func (s *Stream) Read(p []byte) (n int, err error) {
+ n, err = s.dec.Read(p)
+ if err != nil {
+ return
+ }
+ s.windowHandler(n)
+ return
+}
+
+type key int
+
+// The key to save transport.Stream in the context.
+const streamKey = key(0)
+
+// newContextWithStream creates a new context from ctx and attaches stream
+// to it.
+func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
+ return context.WithValue(ctx, streamKey, stream)
+}
+
+// StreamFromContext returns the stream saved in ctx.
+func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
+ s, ok = ctx.Value(streamKey).(*Stream)
+ return
+}
+
+// state of transport
+type transportState int
+
+const (
+ reachable transportState = iota
+ unreachable
+ closing
+)
+
+// NewServerTransport creates a ServerTransport with conn or non-nil error
+// if it fails.
+func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) {
+ return newHTTP2Server(conn, maxStreams, authInfo)
+}
+
+// ConnectOptions covers all relevant options for dialing a server.
+type ConnectOptions struct {
+ // UserAgent is the application user agent.
+ UserAgent string
+ // Dialer specifies how to dial a network address.
+ Dialer func(string, time.Duration) (net.Conn, error)
+ // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs.
+ AuthOptions []credentials.Credentials
+ // Timeout specifies the timeout for dialing a client connection.
+ Timeout time.Duration
+}
+
+// NewClientTransport establishes the transport with the required ConnectOptions
+// and returns it to the caller.
+func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) {
+ return newHTTP2Client(target, opts)
+}
+
+// Options provides additional hints and information for message
+// transmission.
+type Options struct {
+ // Indicate whether it is the last piece for this stream.
+ Last bool
+ // The hint to transport impl whether the data could be buffered for
+ // batching write. Transport impl can feel free to ignore it.
+ Delay bool
+}
+
+// CallHdr carries the information of a particular RPC.
+type CallHdr struct {
+ Host string // peer host
+ Method string // the operation to perform on the specified host
+}
+
+// ClientTransport is the common interface for all gRPC client side transport
+// implementations.
+type ClientTransport interface {
+ // Close tears down this transport. Once it returns, the transport
+ // should not be accessed any more. The caller must make sure this
+ // is called only once.
+ Close() error
+
+ // Write sends the data for the given stream. A nil stream indicates
+ // the write is to be performed on the transport as a whole.
+ Write(s *Stream, data []byte, opts *Options) error
+
+ // NewStream creates a Stream for an RPC.
+ NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
+
+ // CloseStream clears the footprint of a stream when the stream is
+ // not needed any more. The err indicates the error incurred when
+ // CloseStream is called. Must be called when a stream is finished
+ // unless the associated transport is closing.
+ CloseStream(stream *Stream, err error)
+
+ // Error returns a channel that is closed when some I/O error
+ // happens. Typically the caller should have a goroutine to monitor
+ // this in order to take action (e.g., close the current transport
+ // and create a new one) in error case. It should not return nil
+ // once the transport is initiated.
+ Error() <-chan struct{}
+}
+
+// ServerTransport is the common interface for all gRPC server side transport
+// implementations.
+type ServerTransport interface {
+ // WriteStatus sends the status of a stream to the client.
+ WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
+ // Write sends the data for the given stream.
+ Write(s *Stream, data []byte, opts *Options) error
+ // WriteHeader sends the header metedata for the given stream.
+ WriteHeader(s *Stream, md metadata.MD) error
+ // HandleStreams receives incoming streams using the given handler.
+ HandleStreams(func(*Stream))
+ // Close tears down the transport. Once it is called, the transport
+ // should not be accessed any more. All the pending streams and their
+ // handlers will be terminated asynchronously.
+ Close() error
+ // RemoteAddr returns the remote network address.
+ RemoteAddr() net.Addr
+}
+
+// StreamErrorf creates an StreamError with the specified error code and description.
+func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
+ return StreamError{
+ Code: c,
+ Desc: fmt.Sprintf(format, a...),
+ }
+}
+
+// ConnectionErrorf creates an ConnectionError with the specified error description.
+func ConnectionErrorf(format string, a ...interface{}) ConnectionError {
+ return ConnectionError{
+ Desc: fmt.Sprintf(format, a...),
+ }
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection and the retry of all the active streams.
+type ConnectionError struct {
+ Desc string
+}
+
+func (e ConnectionError) Error() string {
+ return fmt.Sprintf("connection error: desc = %q", e.Desc)
+}
+
+// Define some common ConnectionErrors.
+var ErrConnClosing = ConnectionError{Desc: "transport is closing"}
+
+// StreamError is an error that only affects one stream within a connection.
+type StreamError struct {
+ Code codes.Code
+ Desc string
+}
+
+func (e StreamError) Error() string {
+ return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc)
+}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+ switch err {
+ case context.DeadlineExceeded:
+ return StreamErrorf(codes.DeadlineExceeded, "%v", err)
+ case context.Canceled:
+ return StreamErrorf(codes.Canceled, "%v", err)
+ }
+ panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
+}
+
+// wait blocks until it can receive from ctx.Done, closing, or proceed.
+// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
+// If it receives from closing, it returns 0, ErrConnClosing.
+// If it receives from proceed, it returns the received integer, nil.
+func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) {
+ select {
+ case <-ctx.Done():
+ return 0, ContextErr(ctx.Err())
+ case <-closing:
+ return 0, ErrConnClosing
+ case i := <-proceed:
+ return i, nil
+ }
+}