summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTibor Vass <tiborvass@users.noreply.github.com>2016-06-14 00:55:55 -0700
committerGitHub <noreply@github.com>2016-06-14 00:55:55 -0700
commita1e319e847f41e648ebede7f9d79999d75bd14c8 (patch)
tree1c69e84e1dbd018a3fdf7f74119550bdb383de74
parentc69614deae7f20e0dbff8399ce3a8d11014b0f43 (diff)
parentea4fef2d875de39044ca7570c35365b75086e8a5 (diff)
downloaddocker-a1e319e847f41e648ebede7f9d79999d75bd14c8.tar.gz
Merge pull request #23361 from docker/swarm
Add dependency to docker/swarmkit
-rw-r--r--api/client/idresolver/idresolver.go70
-rw-r--r--api/client/info.go16
-rw-r--r--api/client/inspect.go31
-rw-r--r--api/client/network/list.go10
-rw-r--r--api/client/node/accept.go40
-rw-r--r--api/client/node/cmd.go49
-rw-r--r--api/client/node/demote.go40
-rw-r--r--api/client/node/inspect.go141
-rw-r--r--api/client/node/list.go119
-rw-r--r--api/client/node/opts.go50
-rw-r--r--api/client/node/promote.go40
-rw-r--r--api/client/node/remove.go36
-rw-r--r--api/client/node/tasks.go72
-rw-r--r--api/client/node/update.go100
-rw-r--r--api/client/service/cmd.go32
-rw-r--r--api/client/service/create.go47
-rw-r--r--api/client/service/inspect.go127
-rw-r--r--api/client/service/list.go97
-rw-r--r--api/client/service/opts.go462
-rw-r--r--api/client/service/remove.go47
-rw-r--r--api/client/service/scale.go86
-rw-r--r--api/client/service/tasks.go65
-rw-r--r--api/client/service/update.go244
-rw-r--r--api/client/swarm/cmd.go30
-rw-r--r--api/client/swarm/init.go61
-rw-r--r--api/client/swarm/inspect.go56
-rw-r--r--api/client/swarm/join.go65
-rw-r--r--api/client/swarm/leave.go44
-rw-r--r--api/client/swarm/opts.go120
-rw-r--r--api/client/swarm/update.go93
-rw-r--r--api/client/tag.go20
-rw-r--r--api/client/task/print.go79
-rw-r--r--api/client/utils.go25
-rw-r--r--api/server/httputils/errors.go4
-rw-r--r--api/server/router/network/backend.go3
-rw-r--r--api/server/router/network/filter.go98
-rw-r--r--api/server/router/network/network.go15
-rw-r--r--api/server/router/network/network_routes.go51
-rw-r--r--api/server/router/swarm/backend.go26
-rw-r--r--api/server/router/swarm/cluster.go44
-rw-r--r--api/server/router/swarm/cluster_routes.go229
-rw-r--r--api/server/router/system/system.go15
-rw-r--r--api/server/router/system/system_routes.go3
-rw-r--r--cli/cobraadaptor/adaptor.go6
-rw-r--r--cli/usage.go2
-rw-r--r--cmd/dockerd/daemon.go24
-rw-r--r--container/container.go24
-rw-r--r--container/state.go28
-rw-r--r--daemon/cluster/cluster.go1056
-rw-r--r--daemon/cluster/convert/container.go116
-rw-r--r--daemon/cluster/convert/network.go194
-rw-r--r--daemon/cluster/convert/node.go95
-rw-r--r--daemon/cluster/convert/service.go252
-rw-r--r--daemon/cluster/convert/swarm.go116
-rw-r--r--daemon/cluster/convert/task.go53
-rw-r--r--daemon/cluster/executor/backend.go35
-rw-r--r--daemon/cluster/executor/container/adapter.go229
-rw-r--r--daemon/cluster/executor/container/container.go415
-rw-r--r--daemon/cluster/executor/container/controller.go305
-rw-r--r--daemon/cluster/executor/container/errors.go12
-rw-r--r--daemon/cluster/executor/container/executor.go139
-rw-r--r--daemon/cluster/filters.go93
-rw-r--r--daemon/cluster/helpers.go108
-rw-r--r--daemon/cluster/provider/network.go36
-rw-r--r--daemon/container.go3
-rw-r--r--daemon/container_operations.go7
-rw-r--r--daemon/create.go17
-rw-r--r--daemon/daemon.go12
-rw-r--r--daemon/inspect.go6
-rw-r--r--daemon/inspect_windows.go2
-rw-r--r--daemon/list.go11
-rw-r--r--daemon/network.go152
-rw-r--r--daemon/network/settings.go2
-rw-r--r--daemon/wait.go17
-rw-r--r--docs/reference/api/docker_remote_api_v1.24.md1114
-rw-r--r--docs/reference/commandline/index.md20
-rw-r--r--docs/reference/commandline/info.md7
-rw-r--r--docs/reference/commandline/inspect.md18
-rw-r--r--docs/reference/commandline/node_accept.md28
-rw-r--r--docs/reference/commandline/node_demote.md28
-rw-r--r--docs/reference/commandline/node_inspect.md108
-rw-r--r--docs/reference/commandline/node_ls.md89
-rw-r--r--docs/reference/commandline/node_promote.md28
-rw-r--r--docs/reference/commandline/node_reject.md28
-rw-r--r--docs/reference/commandline/node_rm.md38
-rw-r--r--docs/reference/commandline/node_tasks.md94
-rw-r--r--docs/reference/commandline/node_update.md26
-rw-r--r--docs/reference/commandline/swarm_init.md69
-rw-r--r--docs/reference/commandline/swarm_join.md68
-rw-r--r--docs/reference/commandline/swarm_leave.md52
-rw-r--r--docs/reference/commandline/swarm_update.md37
-rw-r--r--docs/swarm/index.md79
-rw-r--r--docs/swarm/key-concepts.md85
-rw-r--r--docs/swarm/menu.md21
-rw-r--r--docs/swarm/swarm-tutorial/add-nodes.md64
-rw-r--r--docs/swarm/swarm-tutorial/create-swarm.md77
-rw-r--r--docs/swarm/swarm-tutorial/delete-service.md44
-rw-r--r--docs/swarm/swarm-tutorial/deploy-service.md50
-rw-r--r--docs/swarm/swarm-tutorial/drain-node.md129
-rw-r--r--docs/swarm/swarm-tutorial/index.md87
-rw-r--r--docs/swarm/swarm-tutorial/inspect-service.md124
-rw-r--r--docs/swarm/swarm-tutorial/menu.md21
-rw-r--r--docs/swarm/swarm-tutorial/rolling-update.md105
-rw-r--r--docs/swarm/swarm-tutorial/scale-service.md75
-rwxr-xr-xhack/vendor.sh31
-rw-r--r--integration-cli/check_test.go55
-rw-r--r--integration-cli/daemon.go47
-rw-r--r--integration-cli/daemon_swarm.go178
-rw-r--r--integration-cli/docker_api_attach_test.go4
-rw-r--r--integration-cli/docker_api_containers_test.go2
-rw-r--r--integration-cli/docker_api_swarm_test.go573
-rw-r--r--integration-cli/docker_api_test.go2
-rw-r--r--integration-cli/docker_cli_rename_test.go2
-rw-r--r--integration-cli/docker_utils.go65
-rw-r--r--opts/opts.go37
-rw-r--r--runconfig/hostconfig_unix.go2
-rw-r--r--vendor/src/bitbucket.org/ww/goautoneg/Makefile13
-rw-r--r--vendor/src/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--vendor/src/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--vendor/src/github.com/beorn7/perks/quantile/exampledata.txt2388
-rw-r--r--vendor/src/github.com/beorn7/perks/quantile/stream.go292
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/LICENSE24
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/auth/auth.go94
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/certdb/README.md58
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go40
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/config/config.go563
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go188
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/csr/csr.go414
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/errors/doc.go46
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/errors/error.go420
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/errors/http.go47
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go42
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go478
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/info/info.go15
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/initca/initca.go278
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/log/log.go174
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go13
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/signer/local/local.go447
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/signer/signer.go385
-rw-r--r--vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE13
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/crc/crc.go43
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go75
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go29
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go79
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go87
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go65
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go60
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go28
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go42
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go80
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go26
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go29
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/idutil/id.go78
-rw-r--r--vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go60
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/design.md57
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/doc.go293
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/log.go361
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/log_unstable.go139
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/logger.go126
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/node.go488
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/progress.go245
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/raft.go898
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go1768
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto86
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/rawnode.go228
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/status.go76
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/storage.go252
-rw-r--r--vendor/src/github.com/coreos/etcd/raft/util.go116
-rw-r--r--vendor/src/github.com/coreos/etcd/snap/db.go74
-rw-r--r--vendor/src/github.com/coreos/etcd/snap/message.go59
-rw-r--r--vendor/src/github.com/coreos/etcd/snap/metrics.go41
-rw-r--r--vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go332
-rw-r--r--vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto14
-rw-r--r--vendor/src/github.com/coreos/etcd/snap/snapshotter.go189
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/decoder.go103
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/doc.go68
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/encoder.go89
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/metrics.go38
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go45
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/repair.go106
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/util.go93
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/wal.go562
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/walpb/record.go29
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go495
-rw-r--r--vendor/src/github.com/coreos/etcd/wal/walpb/record.proto20
-rw-r--r--vendor/src/github.com/coreos/pkg/LICENSE202
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/README.md39
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/formatters.go106
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go96
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/init.go49
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/init_windows.go25
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go68
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go39
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/logmap.go240
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go158
-rw-r--r--vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go65
-rw-r--r--vendor/src/github.com/docker/engine-api/client/client.go16
-rw-r--r--vendor/src/github.com/docker/engine-api/client/errors.go51
-rw-r--r--vendor/src/github.com/docker/engine-api/client/interface.go17
-rw-r--r--vendor/src/github.com/docker/engine-api/client/node_inspect.go25
-rw-r--r--vendor/src/github.com/docker/engine-api/client/node_list.go36
-rw-r--r--vendor/src/github.com/docker/engine-api/client/node_remove.go10
-rw-r--r--vendor/src/github.com/docker/engine-api/client/node_update.go18
-rw-r--r--vendor/src/github.com/docker/engine-api/client/request.go4
-rw-r--r--vendor/src/github.com/docker/engine-api/client/service_create.go22
-rw-r--r--vendor/src/github.com/docker/engine-api/client/service_inspect.go25
-rw-r--r--vendor/src/github.com/docker/engine-api/client/service_list.go35
-rw-r--r--vendor/src/github.com/docker/engine-api/client/service_remove.go10
-rw-r--r--vendor/src/github.com/docker/engine-api/client/service_update.go18
-rw-r--r--vendor/src/github.com/docker/engine-api/client/swarm_init.go21
-rw-r--r--vendor/src/github.com/docker/engine-api/client/swarm_inspect.go21
-rw-r--r--vendor/src/github.com/docker/engine-api/client/swarm_join.go13
-rw-r--r--vendor/src/github.com/docker/engine-api/client/swarm_leave.go18
-rw-r--r--vendor/src/github.com/docker/engine-api/client/swarm_update.go18
-rw-r--r--vendor/src/github.com/docker/engine-api/client/task_inspect.go34
-rw-r--r--vendor/src/github.com/docker/engine-api/client/task_list.go35
-rw-r--r--vendor/src/github.com/docker/engine-api/types/client.go22
-rw-r--r--vendor/src/github.com/docker/engine-api/types/network/network.go5
-rw-r--r--vendor/src/github.com/docker/engine-api/types/swarm/common.go21
-rw-r--r--vendor/src/github.com/docker/engine-api/types/swarm/container.go67
-rw-r--r--vendor/src/github.com/docker/engine-api/types/swarm/network.go99
-rw-r--r--vendor/src/github.com/docker/engine-api/types/swarm/node.go118
-rw-r--r--vendor/src/github.com/docker/engine-api/types/swarm/service.go44
-rw-r--r--vendor/src/github.com/docker/engine-api/types/swarm/swarm.go107
-rw-r--r--vendor/src/github.com/docker/engine-api/types/swarm/task.go110
-rw-r--r--vendor/src/github.com/docker/engine-api/types/types.go4
-rw-r--r--vendor/src/github.com/docker/go-connections/nat/nat.go162
-rw-r--r--vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go8
-rw-r--r--vendor/src/github.com/docker/go-connections/tlsconfig/config.go7
-rw-r--r--vendor/src/github.com/docker/go-events/retry.go107
-rw-r--r--vendor/src/github.com/docker/libnetwork/Dockerfile.build2
-rw-r--r--vendor/src/github.com/docker/libnetwork/Makefile2
-rw-r--r--vendor/src/github.com/docker/libnetwork/ROADMAP.md2
-rw-r--r--vendor/src/github.com/docker/libnetwork/agent.go267
-rw-r--r--vendor/src/github.com/docker/libnetwork/agent.pb.go893
-rw-r--r--vendor/src/github.com/docker/libnetwork/agent.proto66
-rw-r--r--vendor/src/github.com/docker/libnetwork/circle.yml1
-rw-r--r--vendor/src/github.com/docker/libnetwork/cluster/provider.go10
-rw-r--r--vendor/src/github.com/docker/libnetwork/config/config.go42
-rw-r--r--vendor/src/github.com/docker/libnetwork/controller.go279
-rw-r--r--vendor/src/github.com/docker/libnetwork/discoverapi/discoverapi.go24
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go51
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_store.go2
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go4
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go24
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go12
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go4
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go2
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go6
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go6
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go3
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go27
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go6
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go3
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go27
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go578
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go64
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go8
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go214
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_utils.go75
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.go43
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go468
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.proto27
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go248
-rw-r--r--vendor/src/github.com/docker/libnetwork/drivers/overlay/peerdb.go12
-rw-r--r--vendor/src/github.com/docker/libnetwork/drvregistry/drvregistry.go2
-rw-r--r--vendor/src/github.com/docker/libnetwork/endpoint.go44
-rw-r--r--vendor/src/github.com/docker/libnetwork/endpoint_info.go33
-rw-r--r--vendor/src/github.com/docker/libnetwork/error.go12
-rw-r--r--vendor/src/github.com/docker/libnetwork/ipamapi/contract.go5
-rw-r--r--vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go4
-rw-r--r--vendor/src/github.com/docker/libnetwork/ipams/remote/api/api.go8
-rw-r--r--vendor/src/github.com/docker/libnetwork/ipvs/constants.go130
-rw-r--r--vendor/src/github.com/docker/libnetwork/ipvs/ipvs.go113
-rw-r--r--vendor/src/github.com/docker/libnetwork/ipvs/netlink.go234
-rw-r--r--vendor/src/github.com/docker/libnetwork/netutils/utils_linux.go22
-rw-r--r--vendor/src/github.com/docker/libnetwork/network.go53
-rw-r--r--vendor/src/github.com/docker/libnetwork/networkdb/broadcast.go66
-rw-r--r--vendor/src/github.com/docker/libnetwork/networkdb/cluster.go121
-rw-r--r--vendor/src/github.com/docker/libnetwork/networkdb/delegate.go158
-rw-r--r--vendor/src/github.com/docker/libnetwork/networkdb/message.go148
-rw-r--r--vendor/src/github.com/docker/libnetwork/networkdb/networkdb.go19
-rw-r--r--vendor/src/github.com/docker/libnetwork/networkdb/networkdb.pb.go2266
-rw-r--r--vendor/src/github.com/docker/libnetwork/networkdb/networkdb.proto156
-rw-r--r--vendor/src/github.com/docker/libnetwork/ns/init_linux.go29
-rw-r--r--vendor/src/github.com/docker/libnetwork/osl/interface_linux.go250
-rw-r--r--vendor/src/github.com/docker/libnetwork/osl/namespace_linux.go80
-rw-r--r--vendor/src/github.com/docker/libnetwork/osl/neigh_linux.go126
-rw-r--r--vendor/src/github.com/docker/libnetwork/osl/options_linux.go6
-rw-r--r--vendor/src/github.com/docker/libnetwork/osl/route_linux.go84
-rw-r--r--vendor/src/github.com/docker/libnetwork/osl/sandbox.go12
-rw-r--r--vendor/src/github.com/docker/libnetwork/resolver.go43
-rw-r--r--vendor/src/github.com/docker/libnetwork/sandbox.go89
-rw-r--r--vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go2
-rw-r--r--vendor/src/github.com/docker/libnetwork/service.go95
-rw-r--r--vendor/src/github.com/docker/libnetwork/service_linux.go646
-rw-r--r--vendor/src/github.com/docker/libnetwork/service_unsupported.go19
-rw-r--r--vendor/src/github.com/docker/libnetwork/types/types.go15
-rw-r--r--vendor/src/github.com/docker/swarmkit/LICENSE201
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/agent.go354
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/config.go49
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/errors.go24
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/exec/controller.go267
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go143
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/exec/errors.go96
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/exec/executor.go23
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/helpers.go13
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/node.go738
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/reporter.go124
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/session.go265
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/storage.go224
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/task.go243
-rw-r--r--vendor/src/github.com/docker/swarmkit/agent/worker.go260
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/ca.pb.go1616
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/ca.proto51
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/control.pb.go10185
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/control.proto275
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go2440
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/dispatcher.proto154
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go456
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/duration/duration.proto100
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/duration/gen.go3
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/gen.go3
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/objects.pb.go3469
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/objects.proto207
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/raft.pb.go2764
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/raft.proto100
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go1115
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/snapshot.proto40
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/specs.pb.go3882
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/specs.proto231
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/timestamp/gen.go3
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go469
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.proto121
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/types.pb.go10052
-rw-r--r--vendor/src/github.com/docker/swarmkit/api/types.proto564
-rw-r--r--vendor/src/github.com/docker/swarmkit/ca/auth.go213
-rw-r--r--vendor/src/github.com/docker/swarmkit/ca/certificates.go711
-rw-r--r--vendor/src/github.com/docker/swarmkit/ca/config.go513
-rw-r--r--vendor/src/github.com/docker/swarmkit/ca/forward.go67
-rw-r--r--vendor/src/github.com/docker/swarmkit/ca/server.go648
-rw-r--r--vendor/src/github.com/docker/swarmkit/ca/transport.go194
-rw-r--r--vendor/src/github.com/docker/swarmkit/identity/doc.go17
-rw-r--r--vendor/src/github.com/docker/swarmkit/identity/randomid.go83
-rw-r--r--vendor/src/github.com/docker/swarmkit/ioutils/ioutils.go40
-rw-r--r--vendor/src/github.com/docker/swarmkit/log/context.go37
-rw-r--r--vendor/src/github.com/docker/swarmkit/log/grpc.go8
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/allocator/allocator.go221
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/allocator/doc.go18
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/allocator/network.go777
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go635
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go244
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/controlapi/cluster.go197
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/controlapi/common.go86
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/controlapi/network.go244
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/controlapi/node.go243
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/controlapi/server.go27
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go245
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/controlapi/task.go136
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go760
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/dispatcher/heartbeat/heartbeat.go39
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/dispatcher/nodes.go162
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/dispatcher/period.go28
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/doc.go1
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/keymanager/keymanager.go229
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/manager.go670
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/orchestrator/global.go408
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/orchestrator/replicated.go170
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go383
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/orchestrator/services.go163
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/orchestrator/task_reaper.go203
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/orchestrator/tasks.go233
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go228
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go12
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go133
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/scheduler/constraint.go74
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/scheduler/expr.go95
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go131
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go165
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go64
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/scheduler/pipeline.go54
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go433
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/doc.go32
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/proposer.go17
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go208
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go1161
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/raft/storage.go325
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go82
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go70
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/apply.go48
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/by.go113
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/clusters.go227
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/combinators.go14
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/memory.go731
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/networks.go221
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/nodes.go254
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/object.go29
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/services.go221
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/store/tasks.go296
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/watch.go488
-rw-r--r--vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go48
-rw-r--r--vendor/src/github.com/docker/swarmkit/picker/picker.go330
-rw-r--r--vendor/src/github.com/docker/swarmkit/protobuf/plugin/gen.go3
-rw-r--r--vendor/src/github.com/docker/swarmkit/protobuf/plugin/helpers.go11
-rw-r--r--vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go464
-rw-r--r--vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.proto25
-rw-r--r--vendor/src/github.com/docker/swarmkit/protobuf/ptypes/doc.go9
-rw-r--r--vendor/src/github.com/docker/swarmkit/protobuf/ptypes/duration.go102
-rw-r--r--vendor/src/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go135
-rw-r--r--vendor/src/github.com/gogo/protobuf/LICENSE36
-rw-r--r--vendor/src/github.com/gogo/protobuf/gogoproto/Makefile36
-rw-r--r--vendor/src/github.com/gogo/protobuf/gogoproto/doc.go168
-rw-r--r--vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go661
-rw-r--r--vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden45
-rw-r--r--vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto120
-rw-r--r--vendor/src/github.com/gogo/protobuf/gogoproto/helper.go308
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/Makefile43
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/clone.go228
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/decode.go872
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go169
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/encode.go1325
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go354
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/equal.go276
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/extensions.go518
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go236
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/lib.go894
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go40
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/message_set.go280
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go479
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go266
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go108
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/properties.go923
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go64
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go117
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/text.go793
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/text_gogo.go55
-rw-r--r--vendor/src/github.com/gogo/protobuf/proto/text_parser.go849
-rw-r--r--vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile33
-rw-r--r--vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go2017
-rw-r--r--vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go635
-rw-r--r--vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go355
-rw-r--r--vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go99
-rw-r--r--vendor/src/github.com/golang/mock/LICENSE202
-rw-r--r--vendor/src/github.com/golang/mock/gomock/call.go268
-rw-r--r--vendor/src/github.com/golang/mock/gomock/callset.go76
-rw-r--r--vendor/src/github.com/golang/mock/gomock/controller.go167
-rw-r--r--vendor/src/github.com/golang/mock/gomock/matchers.go97
-rw-r--r--vendor/src/github.com/google/certificate-transparency/LICENSE202
-rw-r--r--vendor/src/github.com/google/certificate-transparency/go/README.md25
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/asn1/asn1.go956
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/asn1/common.go163
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/asn1/marshal.go581
-rw-r--r--vendor/src/github.com/google/certificate-transparency/go/client/logclient.go357
-rw-r--r--vendor/src/github.com/google/certificate-transparency/go/serialization.go512
-rw-r--r--vendor/src/github.com/google/certificate-transparency/go/signatures.go131
-rw-r--r--vendor/src/github.com/google/certificate-transparency/go/types.go363
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/cert_pool.go116
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go233
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/pkcs1.go124
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/pkcs8.go56
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go173
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/root.go17
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/root_darwin.go83
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/root_plan9.go33
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/root_stub.go14
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/root_unix.go37
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/root_windows.go229
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/sec1.go85
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/verify.go476
-rwxr-xr-xvendor/src/github.com/google/certificate-transparency/go/x509/x509.go1622
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/.gitignore24
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/.travis.yml3
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/LICENSE363
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/README.md41
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/edges.go21
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/iradix.go333
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/iter.go81
-rw-r--r--vendor/src/github.com/hashicorp/go-immutable-radix/node.go289
-rw-r--r--vendor/src/github.com/hashicorp/go-memdb/.gitignore24
-rw-r--r--vendor/src/github.com/hashicorp/go-memdb/LICENSE363
-rw-r--r--vendor/src/github.com/hashicorp/go-memdb/README.md93
-rw-r--r--vendor/src/github.com/hashicorp/go-memdb/index.go330
-rw-r--r--vendor/src/github.com/hashicorp/go-memdb/memdb.go89
-rw-r--r--vendor/src/github.com/hashicorp/go-memdb/schema.go76
-rw-r--r--vendor/src/github.com/hashicorp/go-memdb/txn.go475
-rw-r--r--vendor/src/github.com/hashicorp/golang-lru/LICENSE362
-rw-r--r--vendor/src/github.com/hashicorp/golang-lru/simplelru/lru.go160
-rw-r--r--vendor/src/github.com/matttproud/golang_protobuf_extensions/LICENSE201
-rw-r--r--vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go75
-rw-r--r--vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go16
-rw-r--r--vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go46
-rw-r--r--vendor/src/github.com/mreiferson/go-httpclient/.gitignore1
-rw-r--r--vendor/src/github.com/mreiferson/go-httpclient/.travis.yml11
-rw-r--r--vendor/src/github.com/mreiferson/go-httpclient/LICENSE17
-rw-r--r--vendor/src/github.com/mreiferson/go-httpclient/README.md41
-rw-r--r--vendor/src/github.com/mreiferson/go-httpclient/httpclient.go237
-rw-r--r--vendor/src/github.com/pivotal-golang/clock/LICENSE202
-rw-r--r--vendor/src/github.com/pivotal-golang/clock/README.md1
-rw-r--r--vendor/src/github.com/pivotal-golang/clock/clock.go42
-rw-r--r--vendor/src/github.com/pivotal-golang/clock/ticker.go20
-rw-r--r--vendor/src/github.com/pivotal-golang/clock/timer.go25
-rw-r--r--vendor/src/github.com/prometheus/client_golang/LICENSE201
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/.gitignore1
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/README.md53
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/collector.go75
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/counter.go175
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/desc.go201
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/doc.go109
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/expvar.go119
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go147
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go263
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go450
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/http.go361
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/metric.go166
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go142
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/push.go65
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/registry.go726
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/summary.go540
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go145
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/value.go234
-rw-r--r--vendor/src/github.com/prometheus/client_golang/prometheus/vec.go247
-rw-r--r--vendor/src/github.com/prometheus/client_model/LICENSE201
-rw-r--r--vendor/src/github.com/prometheus/client_model/go/metrics.pb.go364
-rw-r--r--vendor/src/github.com/prometheus/client_model/ruby/LICENSE201
-rw-r--r--vendor/src/github.com/prometheus/common/LICENSE201
-rw-r--r--vendor/src/github.com/prometheus/common/expfmt/decode.go411
-rw-r--r--vendor/src/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--vendor/src/github.com/prometheus/common/expfmt/expfmt.go40
-rw-r--r--vendor/src/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--vendor/src/github.com/prometheus/common/expfmt/json_decode.go162
-rw-r--r--vendor/src/github.com/prometheus/common/expfmt/text_create.go305
-rw-r--r--vendor/src/github.com/prometheus/common/expfmt/text_parse.go746
-rw-r--r--vendor/src/github.com/prometheus/common/model/alert.go109
-rw-r--r--vendor/src/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--vendor/src/github.com/prometheus/common/model/labels.go188
-rw-r--r--vendor/src/github.com/prometheus/common/model/labelset.go153
-rw-r--r--vendor/src/github.com/prometheus/common/model/metric.go81
-rw-r--r--vendor/src/github.com/prometheus/common/model/model.go16
-rw-r--r--vendor/src/github.com/prometheus/common/model/signature.go190
-rw-r--r--vendor/src/github.com/prometheus/common/model/silence.go60
-rw-r--r--vendor/src/github.com/prometheus/common/model/time.go230
-rw-r--r--vendor/src/github.com/prometheus/common/model/value.go395
-rw-r--r--vendor/src/github.com/prometheus/procfs/.travis.yml7
-rw-r--r--vendor/src/github.com/prometheus/procfs/AUTHORS.md20
-rw-r--r--vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md18
-rw-r--r--vendor/src/github.com/prometheus/procfs/LICENSE201
-rw-r--r--vendor/src/github.com/prometheus/procfs/Makefile6
-rw-r--r--vendor/src/github.com/prometheus/procfs/NOTICE7
-rw-r--r--vendor/src/github.com/prometheus/procfs/README.md10
-rw-r--r--vendor/src/github.com/prometheus/procfs/doc.go45
-rw-r--r--vendor/src/github.com/prometheus/procfs/fs.go40
-rw-r--r--vendor/src/github.com/prometheus/procfs/ipvs.go223
-rw-r--r--vendor/src/github.com/prometheus/procfs/mdstat.go158
-rw-r--r--vendor/src/github.com/prometheus/procfs/proc.go202
-rw-r--r--vendor/src/github.com/prometheus/procfs/proc_io.go54
-rw-r--r--vendor/src/github.com/prometheus/procfs/proc_limits.go111
-rw-r--r--vendor/src/github.com/prometheus/procfs/proc_stat.go175
-rw-r--r--vendor/src/github.com/prometheus/procfs/stat.go55
-rw-r--r--vendor/src/github.com/vishvananda/netlink/Makefile4
-rw-r--r--vendor/src/github.com/vishvananda/netlink/addr_linux.go182
-rw-r--r--vendor/src/github.com/vishvananda/netlink/bpf_linux.go60
-rw-r--r--vendor/src/github.com/vishvananda/netlink/class.go12
-rw-r--r--vendor/src/github.com/vishvananda/netlink/class_linux.go64
-rw-r--r--vendor/src/github.com/vishvananda/netlink/filter.go201
-rw-r--r--vendor/src/github.com/vishvananda/netlink/filter_linux.go271
-rw-r--r--vendor/src/github.com/vishvananda/netlink/handle.go86
-rw-r--r--vendor/src/github.com/vishvananda/netlink/link.go34
-rw-r--r--vendor/src/github.com/vishvananda/netlink/link_linux.go277
-rw-r--r--vendor/src/github.com/vishvananda/netlink/neigh_linux.go51
-rw-r--r--vendor/src/github.com/vishvananda/netlink/netlink.go2
-rw-r--r--vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go76
-rw-r--r--vendor/src/github.com/vishvananda/netlink/nl/tc_linux.go124
-rw-r--r--vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go18
-rw-r--r--vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go33
-rw-r--r--vendor/src/github.com/vishvananda/netlink/protinfo_linux.go8
-rw-r--r--vendor/src/github.com/vishvananda/netlink/qdisc.go47
-rw-r--r--vendor/src/github.com/vishvananda/netlink/qdisc_linux.go55
-rw-r--r--vendor/src/github.com/vishvananda/netlink/route.go8
-rw-r--r--vendor/src/github.com/vishvananda/netlink/route_linux.go48
-rw-r--r--vendor/src/github.com/vishvananda/netlink/rule_linux.go24
-rw-r--r--vendor/src/github.com/vishvananda/netlink/xfrm.go10
-rw-r--r--vendor/src/github.com/vishvananda/netlink/xfrm_policy.go15
-rw-r--r--vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go222
-rw-r--r--vendor/src/github.com/vishvananda/netlink/xfrm_state.go53
-rw-r--r--vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go302
-rw-r--r--vendor/src/golang.org/x/crypto/LICENSE27
-rw-r--r--vendor/src/golang.org/x/crypto/bcrypt/base64.go35
-rw-r--r--vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go294
-rw-r--r--vendor/src/golang.org/x/crypto/blowfish/block.go159
-rw-r--r--vendor/src/golang.org/x/crypto/blowfish/cipher.go91
-rw-r--r--vendor/src/golang.org/x/crypto/blowfish/const.go199
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go50
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/crypto.go131
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/errors.go23
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go274
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/mac.go45
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go170
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go342
-rw-r--r--vendor/src/golang.org/x/crypto/pkcs12/safebags.go57
-rw-r--r--volume/volume.go11
600 files changed, 131469 insertions, 1471 deletions
diff --git a/api/client/idresolver/idresolver.go b/api/client/idresolver/idresolver.go
new file mode 100644
index 0000000000..05c4c9c366
--- /dev/null
+++ b/api/client/idresolver/idresolver.go
@@ -0,0 +1,70 @@
+package idresolver
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/client"
+ "github.com/docker/engine-api/types/swarm"
+)
+
+// IDResolver provides ID to Name resolution.
+type IDResolver struct {
+ client client.APIClient
+ noResolve bool
+ cache map[string]string
+}
+
+// New creates a new IDResolver.
+func New(client client.APIClient, noResolve bool) *IDResolver {
+ return &IDResolver{
+ client: client,
+ noResolve: noResolve,
+ cache: make(map[string]string),
+ }
+}
+
+func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) {
+ switch t.(type) {
+ case swarm.Node:
+ node, err := r.client.NodeInspect(ctx, id)
+ if err != nil {
+ return id, nil
+ }
+ if node.Spec.Annotations.Name != "" {
+ return node.Spec.Annotations.Name, nil
+ }
+ if node.Description.Hostname != "" {
+ return node.Description.Hostname, nil
+ }
+ return id, nil
+ case swarm.Service:
+ service, err := r.client.ServiceInspect(ctx, id)
+ if err != nil {
+ return id, nil
+ }
+ return service.Spec.Annotations.Name, nil
+ default:
+ return "", fmt.Errorf("unsupported type")
+ }
+
+}
+
+// Resolve will attempt to resolve an ID to a Name by querying the manager.
+// Results are stored into a cache.
+// If the `-n` flag is used in the command-line, resolution is disabled.
+func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) {
+ if r.noResolve {
+ return id, nil
+ }
+ if name, ok := r.cache[id]; ok {
+ return name, nil
+ }
+ name, err := r.get(ctx, t, id)
+ if err != nil {
+ return "", err
+ }
+ r.cache[id] = name
+ return name, nil
+}
diff --git a/api/client/info.go b/api/client/info.go
index 283b77b3df..4566f6c1a6 100644
--- a/api/client/info.go
+++ b/api/client/info.go
@@ -10,6 +10,7 @@ import (
"github.com/docker/docker/pkg/ioutils"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/utils"
+ "github.com/docker/engine-api/types/swarm"
"github.com/docker/go-units"
)
@@ -68,6 +69,21 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
fmt.Fprintf(cli.out, "\n")
}
+ fmt.Fprintf(cli.out, "Swarm: %v\n", info.Swarm.LocalNodeState)
+ if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive {
+ fmt.Fprintf(cli.out, " NodeID: %s\n", info.Swarm.NodeID)
+ if info.Swarm.Error != "" {
+ fmt.Fprintf(cli.out, " Error: %v\n", info.Swarm.Error)
+ }
+ if info.Swarm.ControlAvailable {
+ fmt.Fprintf(cli.out, " IsManager: Yes\n")
+ fmt.Fprintf(cli.out, " Managers: %d\n", info.Swarm.Managers)
+ fmt.Fprintf(cli.out, " Nodes: %d\n", info.Swarm.Nodes)
+ ioutils.FprintfIfNotEmpty(cli.out, " CACertHash: %s\n", info.Swarm.CACertHash)
+ } else {
+ fmt.Fprintf(cli.out, " IsManager: No\n")
+ }
+ }
ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion)
ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem)
ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType)
diff --git a/api/client/inspect.go b/api/client/inspect.go
index cb16b1bb52..3b107a7e43 100644
--- a/api/client/inspect.go
+++ b/api/client/inspect.go
@@ -11,19 +11,19 @@ import (
"github.com/docker/engine-api/client"
)
-// CmdInspect displays low-level information on one or more containers or images.
+// CmdInspect displays low-level information on one or more containers, images or tasks.
//
-// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
+// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]
func (cli *DockerCli) CmdInspect(args ...string) error {
- cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true)
+ cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]"}, Cli.DockerCommands["inspect"].Description, true)
tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template")
- inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)")
+ inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image, container or task)")
size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container")
cmd.Require(flag.Min, 1)
cmd.ParseFlags(args, true)
- if *inspectType != "" && *inspectType != "container" && *inspectType != "image" {
+ if *inspectType != "" && *inspectType != "container" && *inspectType != "image" && *inspectType != "task" {
return fmt.Errorf("%q is not a valid value for --type", *inspectType)
}
@@ -35,6 +35,11 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
elementSearcher = cli.inspectContainers(ctx, *size)
case "image":
elementSearcher = cli.inspectImages(ctx, *size)
+ case "task":
+ if *size {
+ fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks")
+ }
+ elementSearcher = cli.inspectTasks(ctx)
default:
elementSearcher = cli.inspectAll(ctx, *size)
}
@@ -54,6 +59,12 @@ func (cli *DockerCli) inspectImages(ctx context.Context, getSize bool) inspect.G
}
}
+func (cli *DockerCli) inspectTasks(ctx context.Context) inspect.GetRefFunc {
+ return func(ref string) (interface{}, []byte, error) {
+ return cli.client.TaskInspectWithRaw(ctx, ref)
+ }
+}
+
func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspect.GetRefFunc {
return func(ref string) (interface{}, []byte, error) {
c, rawContainer, err := cli.client.ContainerInspectWithRaw(ctx, ref, getSize)
@@ -63,7 +74,15 @@ func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspect.GetR
i, rawImage, err := cli.client.ImageInspectWithRaw(ctx, ref, getSize)
if err != nil {
if client.IsErrImageNotFound(err) {
- return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref)
+ // Search for task with that id if an image doesn't exists.
+ t, rawTask, err := cli.client.TaskInspectWithRaw(ctx, ref)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Error: No such image, container or task: %s", ref)
+ }
+ if getSize {
+ fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks")
+ }
+ return t, rawTask, nil
}
return nil, nil, err
}
diff --git a/api/client/network/list.go b/api/client/network/list.go
index 3e113b7275..292b4f62b0 100644
--- a/api/client/network/list.go
+++ b/api/client/network/list.go
@@ -71,7 +71,7 @@ func runList(dockerCli *client.DockerCli, opts listOptions) error {
w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)
if !opts.quiet {
- fmt.Fprintf(w, "NETWORK ID\tNAME\tDRIVER")
+ fmt.Fprintf(w, "NETWORK ID\tNAME\tDRIVER\tSCOPE")
fmt.Fprintf(w, "\n")
}
@@ -79,6 +79,8 @@ func runList(dockerCli *client.DockerCli, opts listOptions) error {
for _, networkResource := range networkResources {
ID := networkResource.ID
netName := networkResource.Name
+ driver := networkResource.Driver
+ scope := networkResource.Scope
if !opts.noTrunc {
ID = stringid.TruncateID(ID)
}
@@ -86,11 +88,11 @@ func runList(dockerCli *client.DockerCli, opts listOptions) error {
fmt.Fprintln(w, ID)
continue
}
- driver := networkResource.Driver
- fmt.Fprintf(w, "%s\t%s\t%s\t",
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\t",
ID,
netName,
- driver)
+ driver,
+ scope)
fmt.Fprint(w, "\n")
}
w.Flush()
diff --git a/api/client/node/accept.go b/api/client/node/accept.go
new file mode 100644
index 0000000000..ae672ffe90
--- /dev/null
+++ b/api/client/node/accept.go
@@ -0,0 +1,40 @@
+package node
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+func newAcceptCommand(dockerCli *client.DockerCli) *cobra.Command {
+ var flags *pflag.FlagSet
+
+ cmd := &cobra.Command{
+ Use: "accept NODE [NODE...]",
+ Short: "Accept a node in the swarm",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runAccept(dockerCli, flags, args)
+ },
+ }
+
+ flags = cmd.Flags()
+ return cmd
+}
+
+func runAccept(dockerCli *client.DockerCli, flags *pflag.FlagSet, args []string) error {
+ for _, id := range args {
+ if err := runUpdate(dockerCli, id, func(node *swarm.Node) {
+ node.Spec.Membership = swarm.NodeMembershipAccepted
+ }); err != nil {
+ return err
+ }
+ fmt.Println(id, "attempting to accept a node in the swarm.")
+ }
+
+ return nil
+}
diff --git a/api/client/node/cmd.go b/api/client/node/cmd.go
new file mode 100644
index 0000000000..d951043f78
--- /dev/null
+++ b/api/client/node/cmd.go
@@ -0,0 +1,49 @@
+package node
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/spf13/cobra"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ apiclient "github.com/docker/engine-api/client"
+)
+
+// NewNodeCommand returns a cobra command for `node` subcommands
+func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "node",
+ Short: "Manage docker swarm nodes",
+ Args: cli.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
+ },
+ }
+ cmd.AddCommand(
+ newAcceptCommand(dockerCli),
+ newDemoteCommand(dockerCli),
+ newInspectCommand(dockerCli),
+ newListCommand(dockerCli),
+ newPromoteCommand(dockerCli),
+ newRemoveCommand(dockerCli),
+ newTasksCommand(dockerCli),
+ newUpdateCommand(dockerCli),
+ )
+ return cmd
+}
+
+func nodeReference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) {
+ // The special value "self" for a node reference is mapped to the current
+ // node, hence the node ID is retrieved using the `/info` endpoint.
+ if ref == "self" {
+ info, err := client.Info(ctx)
+ if err != nil {
+ return "", err
+ }
+ return info.Swarm.NodeID, nil
+ }
+ return ref, nil
+}
diff --git a/api/client/node/demote.go b/api/client/node/demote.go
new file mode 100644
index 0000000000..25f2073ee6
--- /dev/null
+++ b/api/client/node/demote.go
@@ -0,0 +1,40 @@
+package node
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+func newDemoteCommand(dockerCli *client.DockerCli) *cobra.Command {
+ var flags *pflag.FlagSet
+
+ cmd := &cobra.Command{
+ Use: "demote NODE [NODE...]",
+ Short: "Demote a node from manager in the swarm",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runDemote(dockerCli, flags, args)
+ },
+ }
+
+ flags = cmd.Flags()
+ return cmd
+}
+
+func runDemote(dockerCli *client.DockerCli, flags *pflag.FlagSet, args []string) error {
+ for _, id := range args {
+ if err := runUpdate(dockerCli, id, func(node *swarm.Node) {
+ node.Spec.Role = swarm.NodeRoleWorker
+ }); err != nil {
+ return err
+ }
+ fmt.Println(id, "attempting to demote a manager in the swarm.")
+ }
+
+ return nil
+}
diff --git a/api/client/node/inspect.go b/api/client/node/inspect.go
new file mode 100644
index 0000000000..a4a7291f4b
--- /dev/null
+++ b/api/client/node/inspect.go
@@ -0,0 +1,141 @@
+package node
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/api/client/inspect"
+ "github.com/docker/docker/cli"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/docker/go-units"
+ "github.com/spf13/cobra"
+ "golang.org/x/net/context"
+)
+
+type inspectOptions struct {
+ nodeIds []string
+ format string
+ pretty bool
+}
+
+func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
+ var opts inspectOptions
+
+ cmd := &cobra.Command{
+ Use: "inspect [OPTIONS] self|NODE [NODE...]",
+ Short: "Inspect a node in the swarm",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.nodeIds = args
+ return runInspect(dockerCli, opts)
+ },
+ }
+
+ flags := cmd.Flags()
+ flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template")
+ flags.BoolVarP(&opts.pretty, "pretty", "p", false, "Print the information in a human friendly format.")
+ return cmd
+}
+
+func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+ getRef := func(ref string) (interface{}, []byte, error) {
+ nodeRef, err := nodeReference(client, ctx, ref)
+ if err != nil {
+ return nil, nil, err
+ }
+ node, err := client.NodeInspect(ctx, nodeRef)
+ return node, nil, err
+ }
+
+ if !opts.pretty {
+ return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef)
+ }
+ return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef)
+}
+
+func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error {
+ for idx, ref := range refs {
+ obj, _, err := getRef(ref)
+ if err != nil {
+ return err
+ }
+ printNode(out, obj.(swarm.Node))
+
+ // TODO: better way to do this?
+ // print extra space between objects, but not after the last one
+ if idx+1 != len(refs) {
+ fmt.Fprintf(out, "\n\n")
+ }
+ }
+ return nil
+}
+
+// TODO: use a template
+func printNode(out io.Writer, node swarm.Node) {
+ fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID)
+ ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name)
+ if node.Spec.Labels != nil {
+ fmt.Fprintln(out, "Labels:")
+ for k, v := range node.Spec.Labels {
+ fmt.Fprintf(out, " - %s = %s\n", k, v)
+ }
+ }
+
+ ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname)
+ fmt.Fprintln(out, "Status:")
+ fmt.Fprintf(out, " State:\t\t\t%s\n", client.PrettyPrint(node.Status.State))
+ ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", client.PrettyPrint(node.Status.Message))
+ fmt.Fprintf(out, " Availability:\t\t%s\n", client.PrettyPrint(node.Spec.Availability))
+
+ if node.ManagerStatus != nil {
+ fmt.Fprintln(out, "Manager Status:")
+ fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr)
+ fmt.Fprintf(out, " Raft status:\t\t%s\n", client.PrettyPrint(node.ManagerStatus.Reachability))
+ leader := "No"
+ if node.ManagerStatus.Leader {
+ leader = "Yes"
+ }
+ fmt.Fprintf(out, " Leader:\t\t%s\n", leader)
+ }
+
+ fmt.Fprintln(out, "Platform:")
+ fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS)
+ fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture)
+
+ fmt.Fprintln(out, "Resources:")
+ fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9)
+ fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes)))
+
+ var pluginTypes []string
+ pluginNamesByType := map[string][]string{}
+ for _, p := range node.Description.Engine.Plugins {
+ // append to pluginTypes only if not done previously
+ if _, ok := pluginNamesByType[p.Type]; !ok {
+ pluginTypes = append(pluginTypes, p.Type)
+ }
+ pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name)
+ }
+
+ if len(pluginTypes) > 0 {
+ fmt.Fprintln(out, "Plugins:")
+ sort.Strings(pluginTypes) // ensure stable output
+ for _, pluginType := range pluginTypes {
+ fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", "))
+ }
+ }
+ fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion)
+
+ if len(node.Description.Engine.Labels) != 0 {
+ fmt.Fprintln(out, "Engine Labels:")
+ for k, v := range node.Description.Engine.Labels {
+ fmt.Fprintf(out, " - %s = %s", k, v)
+ }
+ }
+
+}
diff --git a/api/client/node/list.go b/api/client/node/list.go
new file mode 100644
index 0000000000..c21cb943d9
--- /dev/null
+++ b/api/client/node/list.go
@@ -0,0 +1,119 @@
+package node
+
+import (
+ "fmt"
+ "io"
+ "text/tabwriter"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/docker/opts"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+)
+
+const (
+ listItemFmt = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
+)
+
+type listOptions struct {
+ quiet bool
+ filter opts.FilterOpt
+}
+
+func newListCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := listOptions{filter: opts.NewFilterOpt()}
+
+ cmd := &cobra.Command{
+ Use: "ls",
+ Aliases: []string{"list"},
+ Short: "List nodes in the swarm",
+ Args: cli.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runList(dockerCli, opts)
+ },
+ }
+ flags := cmd.Flags()
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs")
+ flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
+
+ return cmd
+}
+
+func runList(dockerCli *client.DockerCli, opts listOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ nodes, err := client.NodeList(
+ ctx,
+ types.NodeListOptions{Filter: opts.filter.Value()})
+ if err != nil {
+ return err
+ }
+
+ info, err := client.Info(ctx)
+ if err != nil {
+ return err
+ }
+
+ out := dockerCli.Out()
+ if opts.quiet {
+ printQuiet(out, nodes)
+ } else {
+ printTable(out, nodes, info)
+ }
+ return nil
+}
+
+func printTable(out io.Writer, nodes []swarm.Node, info types.Info) {
+ writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0)
+
+ // Ignore flushing errors
+ defer writer.Flush()
+
+ fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MEMBERSHIP", "STATUS", "AVAILABILITY", "MANAGER STATUS", "LEADER")
+ for _, node := range nodes {
+ name := node.Spec.Name
+ availability := string(node.Spec.Availability)
+ membership := string(node.Spec.Membership)
+
+ if name == "" {
+ name = node.Description.Hostname
+ }
+
+ leader := ""
+ if node.ManagerStatus != nil && node.ManagerStatus.Leader {
+ leader = "Yes"
+ }
+
+ reachability := ""
+ if node.ManagerStatus != nil {
+ reachability = string(node.ManagerStatus.Reachability)
+ }
+
+ ID := node.ID
+ if node.ID == info.Swarm.NodeID {
+ ID = ID + " *"
+ }
+
+ fmt.Fprintf(
+ writer,
+ listItemFmt,
+ ID,
+ name,
+ client.PrettyPrint(membership),
+ client.PrettyPrint(string(node.Status.State)),
+ client.PrettyPrint(availability),
+ client.PrettyPrint(reachability),
+ leader)
+ }
+}
+
+func printQuiet(out io.Writer, nodes []swarm.Node) {
+ for _, node := range nodes {
+ fmt.Fprintln(out, node.ID)
+ }
+}
diff --git a/api/client/node/opts.go b/api/client/node/opts.go
new file mode 100644
index 0000000000..cd160252d9
--- /dev/null
+++ b/api/client/node/opts.go
@@ -0,0 +1,50 @@
+package node
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/engine-api/types/swarm"
+)
+
+type nodeOptions struct {
+ role string
+ membership string
+ availability string
+}
+
+func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) {
+ var spec swarm.NodeSpec
+
+ switch swarm.NodeRole(strings.ToLower(opts.role)) {
+ case swarm.NodeRoleWorker:
+ spec.Role = swarm.NodeRoleWorker
+ case swarm.NodeRoleManager:
+ spec.Role = swarm.NodeRoleManager
+ case "":
+ default:
+ return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role)
+ }
+
+ switch swarm.NodeMembership(strings.ToLower(opts.membership)) {
+ case swarm.NodeMembershipAccepted:
+ spec.Membership = swarm.NodeMembershipAccepted
+ case "":
+ default:
+ return swarm.NodeSpec{}, fmt.Errorf("invalid membership %q, only accepted is supported", opts.membership)
+ }
+
+ switch swarm.NodeAvailability(strings.ToLower(opts.availability)) {
+ case swarm.NodeAvailabilityActive:
+ spec.Availability = swarm.NodeAvailabilityActive
+ case swarm.NodeAvailabilityPause:
+ spec.Availability = swarm.NodeAvailabilityPause
+ case swarm.NodeAvailabilityDrain:
+ spec.Availability = swarm.NodeAvailabilityDrain
+ case "":
+ default:
+ return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability)
+ }
+
+ return spec, nil
+}
diff --git a/api/client/node/promote.go b/api/client/node/promote.go
new file mode 100644
index 0000000000..858b36a8c7
--- /dev/null
+++ b/api/client/node/promote.go
@@ -0,0 +1,40 @@
+package node
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+func newPromoteCommand(dockerCli *client.DockerCli) *cobra.Command {
+ var flags *pflag.FlagSet
+
+ cmd := &cobra.Command{
+ Use: "promote NODE [NODE...]",
+ Short: "Promote a node to a manager in the swarm",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runPromote(dockerCli, flags, args)
+ },
+ }
+
+ flags = cmd.Flags()
+ return cmd
+}
+
+func runPromote(dockerCli *client.DockerCli, flags *pflag.FlagSet, args []string) error {
+ for _, id := range args {
+ if err := runUpdate(dockerCli, id, func(node *swarm.Node) {
+ node.Spec.Role = swarm.NodeRoleManager
+ }); err != nil {
+ return err
+ }
+ fmt.Println(id, "attempting to promote a node to a manager in the swarm.")
+ }
+
+ return nil
+}
diff --git a/api/client/node/remove.go b/api/client/node/remove.go
new file mode 100644
index 0000000000..540194062d
--- /dev/null
+++ b/api/client/node/remove.go
@@ -0,0 +1,36 @@
+package node
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/spf13/cobra"
+)
+
+func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command {
+ return &cobra.Command{
+ Use: "rm NODE [NODE...]",
+ Aliases: []string{"remove"},
+ Short: "Remove a node from the swarm",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runRemove(dockerCli, args)
+ },
+ }
+}
+
+func runRemove(dockerCli *client.DockerCli, args []string) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+ for _, nodeID := range args {
+ err := client.NodeRemove(ctx, nodeID)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID)
+ }
+ return nil
+}
diff --git a/api/client/node/tasks.go b/api/client/node/tasks.go
new file mode 100644
index 0000000000..0c044e3dd6
--- /dev/null
+++ b/api/client/node/tasks.go
@@ -0,0 +1,72 @@
+package node
+
+import (
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/api/client/idresolver"
+ "github.com/docker/docker/api/client/task"
+ "github.com/docker/docker/cli"
+ "github.com/docker/docker/opts"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+)
+
+type tasksOptions struct {
+ nodeID string
+ all bool
+ noResolve bool
+ filter opts.FilterOpt
+}
+
+func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := tasksOptions{filter: opts.NewFilterOpt()}
+
+ cmd := &cobra.Command{
+ Use: "tasks [OPTIONS] self|NODE",
+ Short: "List tasks running on a node",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.nodeID = args[0]
+ return runTasks(dockerCli, opts)
+ },
+ }
+ flags := cmd.Flags()
+ flags.BoolVarP(&opts.all, "all", "a", false, "Display all instances")
+ flags.BoolVarP(&opts.noResolve, "no-resolve", "n", false, "Do not map IDs to Names")
+ flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
+
+ return cmd
+}
+
+func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ nodeRef, err := nodeReference(client, ctx, opts.nodeID)
+ if err != nil {
+ return nil
+ }
+ node, err := client.NodeInspect(ctx, nodeRef)
+ if err != nil {
+ return err
+ }
+
+ filter := opts.filter.Value()
+ filter.Add("node", node.ID)
+ if !opts.all {
+ filter.Add("desired_state", string(swarm.TaskStateRunning))
+ filter.Add("desired_state", string(swarm.TaskStateAccepted))
+
+ }
+
+ tasks, err := client.TaskList(
+ ctx,
+ types.TaskListOptions{Filter: filter})
+ if err != nil {
+ return err
+ }
+
+ return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve))
+}
diff --git a/api/client/node/update.go b/api/client/node/update.go
new file mode 100644
index 0000000000..c0e8d88f35
--- /dev/null
+++ b/api/client/node/update.go
@@ -0,0 +1,100 @@
+package node
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ runconfigopts "github.com/docker/docker/runconfig/opts"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/net/context"
+)
+
+func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
+ var opts nodeOptions
+ var flags *pflag.FlagSet
+
+ cmd := &cobra.Command{
+ Use: "update [OPTIONS] NODE",
+ Short: "Update a node",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runUpdate(dockerCli, args[0], mergeNodeUpdate(flags))
+ },
+ }
+
+ flags = cmd.Flags()
+ flags.StringVar(&opts.role, "role", "", "Role of the node (worker/manager)")
+ flags.StringVar(&opts.membership, "membership", "", "Membership of the node (accepted/rejected)")
+ flags.StringVar(&opts.availability, "availability", "", "Availability of the node (active/pause/drain)")
+ return cmd
+}
+
+func runUpdate(dockerCli *client.DockerCli, nodeID string, mergeNode func(node *swarm.Node)) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ node, err := client.NodeInspect(ctx, nodeID)
+ if err != nil {
+ return err
+ }
+
+ mergeNode(&node)
+ err = client.NodeUpdate(ctx, nodeID, node.Version, node.Spec)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID)
+ return nil
+}
+
+func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) {
+ return func(node *swarm.Node) {
+ mergeString := func(flag string, field *string) {
+ if flags.Changed(flag) {
+ *field, _ = flags.GetString(flag)
+ }
+ }
+
+ mergeRole := func(flag string, field *swarm.NodeRole) {
+ if flags.Changed(flag) {
+ str, _ := flags.GetString(flag)
+ *field = swarm.NodeRole(str)
+ }
+ }
+
+ mergeMembership := func(flag string, field *swarm.NodeMembership) {
+ if flags.Changed(flag) {
+ str, _ := flags.GetString(flag)
+ *field = swarm.NodeMembership(str)
+ }
+ }
+
+ mergeAvailability := func(flag string, field *swarm.NodeAvailability) {
+ if flags.Changed(flag) {
+ str, _ := flags.GetString(flag)
+ *field = swarm.NodeAvailability(str)
+ }
+ }
+
+ mergeLabels := func(flag string, field *map[string]string) {
+ if flags.Changed(flag) {
+ values, _ := flags.GetStringSlice(flag)
+ for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
+ (*field)[key] = value
+ }
+ }
+ }
+
+ spec := &node.Spec
+ mergeString("name", &spec.Name)
+ // TODO: setting labels is not working
+ mergeLabels("label", &spec.Labels)
+ mergeRole("role", &spec.Role)
+ mergeMembership("membership", &spec.Membership)
+ mergeAvailability("availability", &spec.Availability)
+ }
+}
diff --git a/api/client/service/cmd.go b/api/client/service/cmd.go
new file mode 100644
index 0000000000..b660c19f6f
--- /dev/null
+++ b/api/client/service/cmd.go
@@ -0,0 +1,32 @@
+package service
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+)
+
+// NewServiceCommand returns a cobra command for `service` subcommands
+func NewServiceCommand(dockerCli *client.DockerCli) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "service",
+ Short: "Manage docker services",
+ Args: cli.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
+ },
+ }
+ cmd.AddCommand(
+ newCreateCommand(dockerCli),
+ newInspectCommand(dockerCli),
+ newTasksCommand(dockerCli),
+ newListCommand(dockerCli),
+ newRemoveCommand(dockerCli),
+ newScaleCommand(dockerCli),
+ newUpdateCommand(dockerCli),
+ )
+ return cmd
+}
diff --git a/api/client/service/create.go b/api/client/service/create.go
new file mode 100644
index 0000000000..2141ca5966
--- /dev/null
+++ b/api/client/service/create.go
@@ -0,0 +1,47 @@
+package service
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/spf13/cobra"
+ "golang.org/x/net/context"
+)
+
+func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := newServiceOptions()
+
+ cmd := &cobra.Command{
+ Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]",
+ Short: "Create a new service",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.image = args[0]
+ if len(args) > 1 {
+ opts.args = args[1:]
+ }
+ return runCreate(dockerCli, opts)
+ },
+ }
+ addServiceFlags(cmd, opts)
+ cmd.Flags().SetInterspersed(false)
+ return cmd
+}
+
+func runCreate(dockerCli *client.DockerCli, opts *serviceOptions) error {
+ client := dockerCli.Client()
+
+ service, err := opts.ToService()
+ if err != nil {
+ return err
+ }
+
+ response, err := client.ServiceCreate(context.Background(), service)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID)
+ return nil
+}
diff --git a/api/client/service/inspect.go b/api/client/service/inspect.go
new file mode 100644
index 0000000000..a75e4e7cc1
--- /dev/null
+++ b/api/client/service/inspect.go
@@ -0,0 +1,127 @@
+package service
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/api/client/inspect"
+ "github.com/docker/docker/cli"
+ "github.com/docker/docker/pkg/ioutils"
+ apiclient "github.com/docker/engine-api/client"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+)
+
+type inspectOptions struct {
+ refs []string
+ format string
+ pretty bool
+}
+
+func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
+ var opts inspectOptions
+
+ cmd := &cobra.Command{
+ Use: "inspect [OPTIONS] SERVICE [SERVICE...]",
+ Short: "Inspect a service",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.refs = args
+
+ if opts.pretty && len(opts.format) > 0 {
+ return fmt.Errorf("--format is incompatible with human friendly format")
+ }
+ return runInspect(dockerCli, opts)
+ },
+ }
+
+ flags := cmd.Flags()
+ flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template")
+ flags.BoolVarP(&opts.pretty, "pretty", "p", false, "Print the information in a human friendly format.")
+ return cmd
+}
+
+func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ getRef := func(ref string) (interface{}, []byte, error) {
+ service, err := client.ServiceInspect(ctx, ref)
+ if err == nil || !apiclient.IsErrServiceNotFound(err) {
+ return service, nil, err
+ }
+ return nil, nil, fmt.Errorf("Error: no such service: %s", ref)
+ }
+
+ if !opts.pretty {
+ return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRef)
+ }
+
+ return printHumanFriendly(dockerCli.Out(), opts.refs, getRef)
+}
+
+func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error {
+ for idx, ref := range refs {
+ obj, _, err := getRef(ref)
+ if err != nil {
+ return err
+ }
+ printService(out, obj.(swarm.Service))
+
+ // TODO: better way to do this?
+ // print extra space between objects, but not after the last one
+ if idx+1 != len(refs) {
+ fmt.Fprintf(out, "\n\n")
+ }
+ }
+ return nil
+}
+
+// TODO: use a template
+func printService(out io.Writer, service swarm.Service) {
+ fmt.Fprintf(out, "ID:\t\t%s\n", service.ID)
+ fmt.Fprintf(out, "Name:\t\t%s\n", service.Spec.Name)
+ if service.Spec.Labels != nil {
+ fmt.Fprintln(out, "Labels:")
+ for k, v := range service.Spec.Labels {
+ fmt.Fprintf(out, " - %s=%s\n", k, v)
+ }
+ }
+
+ if service.Spec.Mode.Global != nil {
+ fmt.Fprintln(out, "Mode:\t\tGLOBAL")
+ } else {
+ fmt.Fprintln(out, "Mode:\t\tREPLICATED")
+ if service.Spec.Mode.Replicated.Replicas != nil {
+ fmt.Fprintf(out, " Replicas:\t\t%d\n", *service.Spec.Mode.Replicated.Replicas)
+ }
+ }
+ fmt.Fprintln(out, "Placement:")
+ fmt.Fprintln(out, " Strategy:\tSPREAD")
+ fmt.Fprintf(out, "UpateConfig:\n")
+ fmt.Fprintf(out, " Parallelism:\t%d\n", service.Spec.UpdateConfig.Parallelism)
+ if service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 {
+ fmt.Fprintf(out, " Delay:\t\t%s\n", service.Spec.UpdateConfig.Delay)
+ }
+ fmt.Fprintf(out, "ContainerSpec:\n")
+ printContainerSpec(out, service.Spec.TaskTemplate.ContainerSpec)
+}
+
+func printContainerSpec(out io.Writer, containerSpec swarm.ContainerSpec) {
+ fmt.Fprintf(out, " Image:\t\t%s\n", containerSpec.Image)
+ if len(containerSpec.Command) > 0 {
+ fmt.Fprintf(out, " Command:\t%s\n", strings.Join(containerSpec.Command, " "))
+ }
+ if len(containerSpec.Args) > 0 {
+ fmt.Fprintf(out, " Args:\t%s\n", strings.Join(containerSpec.Args, " "))
+ }
+ if len(containerSpec.Env) > 0 {
+ fmt.Fprintf(out, " Env:\t\t%s\n", strings.Join(containerSpec.Env, " "))
+ }
+ ioutils.FprintfIfNotEmpty(out, " Dir\t\t%s\n", containerSpec.Dir)
+ ioutils.FprintfIfNotEmpty(out, " User\t\t%s\n", containerSpec.User)
+}
diff --git a/api/client/service/list.go b/api/client/service/list.go
new file mode 100644
index 0000000000..c1246c86d5
--- /dev/null
+++ b/api/client/service/list.go
@@ -0,0 +1,97 @@
+package service
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "text/tabwriter"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/docker/opts"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+)
+
+const (
+ listItemFmt = "%s\t%s\t%s\t%s\t%s\n"
+)
+
+type listOptions struct {
+ quiet bool
+ filter opts.FilterOpt
+}
+
+func newListCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := listOptions{filter: opts.NewFilterOpt()}
+
+ cmd := &cobra.Command{
+ Use: "ls",
+ Aliases: []string{"list"},
+ Short: "List services",
+ Args: cli.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runList(dockerCli, opts)
+ },
+ }
+
+ flags := cmd.Flags()
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs")
+ flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
+
+ return cmd
+}
+
+func runList(dockerCli *client.DockerCli, opts listOptions) error {
+ client := dockerCli.Client()
+
+ services, err := client.ServiceList(
+ context.Background(),
+ types.ServiceListOptions{Filter: opts.filter.Value()})
+ if err != nil {
+ return err
+ }
+
+ out := dockerCli.Out()
+ if opts.quiet {
+ printQuiet(out, services)
+ } else {
+ printTable(out, services)
+ }
+ return nil
+}
+
+func printTable(out io.Writer, services []swarm.Service) {
+ writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0)
+
+ // Ignore flushing errors
+ defer writer.Flush()
+
+ fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "SCALE", "IMAGE", "COMMAND")
+ for _, service := range services {
+ scale := ""
+ if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
+ scale = fmt.Sprintf("%d", *service.Spec.Mode.Replicated.Replicas)
+ } else if service.Spec.Mode.Global != nil {
+ scale = "global"
+ }
+ fmt.Fprintf(
+ writer,
+ listItemFmt,
+ stringid.TruncateID(service.ID),
+ service.Spec.Name,
+ scale,
+ service.Spec.TaskTemplate.ContainerSpec.Image,
+ strings.Join(service.Spec.TaskTemplate.ContainerSpec.Args, " "))
+ }
+}
+
+func printQuiet(out io.Writer, services []swarm.Service) {
+ for _, service := range services {
+ fmt.Fprintln(out, service.ID)
+ }
+}
diff --git a/api/client/service/opts.go b/api/client/service/opts.go
new file mode 100644
index 0000000000..f54c81b00a
--- /dev/null
+++ b/api/client/service/opts.go
@@ -0,0 +1,462 @@
+package service
+
+import (
+ "encoding/csv"
+ "fmt"
+ "math/big"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/opts"
+ runconfigopts "github.com/docker/docker/runconfig/opts"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/docker/go-connections/nat"
+ units "github.com/docker/go-units"
+ "github.com/spf13/cobra"
+)
+
+var (
+ // DefaultReplicas is the default replicas to use for a replicated service
+ DefaultReplicas uint64 = 1
+)
+
+type int64Value interface {
+ Value() int64
+}
+
+type memBytes int64
+
+func (m *memBytes) String() string {
+ return strconv.FormatInt(m.Value(), 10)
+}
+
+func (m *memBytes) Set(value string) error {
+ val, err := units.RAMInBytes(value)
+ *m = memBytes(val)
+ return err
+}
+
+func (m *memBytes) Type() string {
+ return "MemoryBytes"
+}
+
+func (m *memBytes) Value() int64 {
+ return int64(*m)
+}
+
+type nanoCPUs int64
+
+func (c *nanoCPUs) String() string {
+ return strconv.FormatInt(c.Value(), 10)
+}
+
+func (c *nanoCPUs) Set(value string) error {
+ cpu, ok := new(big.Rat).SetString(value)
+ if !ok {
+ return fmt.Errorf("Failed to parse %v as a rational number", value)
+ }
+ nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
+ if !nano.IsInt() {
+ return fmt.Errorf("value is too precise")
+ }
+ *c = nanoCPUs(nano.Num().Int64())
+ return nil
+}
+
+func (c *nanoCPUs) Type() string {
+ return "NanoCPUs"
+}
+
+func (c *nanoCPUs) Value() int64 {
+ return int64(*c)
+}
+
+// DurationOpt is an option type for time.Duration that uses a pointer. This
+// allows us to get nil values outside, instead of defaulting to 0
+type DurationOpt struct {
+ value *time.Duration
+}
+
+// Set a new value on the option
+func (d *DurationOpt) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ d.value = &v
+ return err
+}
+
+// Type returns the type of this option
+func (d *DurationOpt) Type() string {
+ return "duration-ptr"
+}
+
+// String returns a string repr of this option
+func (d *DurationOpt) String() string {
+ if d.value != nil {
+ return d.value.String()
+ }
+ return "none"
+}
+
+// Value returns the time.Duration
+func (d *DurationOpt) Value() *time.Duration {
+ return d.value
+}
+
+// Uint64Opt represents a uint64.
+type Uint64Opt struct {
+ value *uint64
+}
+
+// Set a new value on the option
+func (i *Uint64Opt) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ i.value = &v
+ return err
+}
+
+// Type returns the type of this option
+func (i *Uint64Opt) Type() string {
+ return "uint64-ptr"
+}
+
+// String returns a string repr of this option
+func (i *Uint64Opt) String() string {
+ if i.value != nil {
+ return fmt.Sprintf("%v", *i.value)
+ }
+ return "none"
+}
+
+// Value returns the uint64
+func (i *Uint64Opt) Value() *uint64 {
+ return i.value
+}
+
+// MountOpt is a Value type for parsing mounts
+type MountOpt struct {
+ values []swarm.Mount
+}
+
+// Set a new mount value
+func (m *MountOpt) Set(value string) error {
+ csvReader := csv.NewReader(strings.NewReader(value))
+ fields, err := csvReader.Read()
+ if err != nil {
+ return err
+ }
+
+ mount := swarm.Mount{}
+
+ volumeOptions := func() *swarm.VolumeOptions {
+ if mount.VolumeOptions == nil {
+ mount.VolumeOptions = &swarm.VolumeOptions{
+ Labels: make(map[string]string),
+ }
+ }
+ return mount.VolumeOptions
+ }
+
+ setValueOnMap := func(target map[string]string, value string) {
+ parts := strings.SplitN(value, "=", 2)
+ if len(parts) == 1 {
+ target[value] = ""
+ } else {
+ target[parts[0]] = parts[1]
+ }
+ }
+
+ for _, field := range fields {
+ parts := strings.SplitN(field, "=", 2)
+ if len(parts) == 1 && strings.ToLower(parts[0]) == "writable" {
+ mount.Writable = true
+ continue
+ }
+
+ if len(parts) != 2 {
+ return fmt.Errorf("invald field '%s' must be a key=value pair", field)
+ }
+
+ key, value := parts[0], parts[1]
+ switch strings.ToLower(key) {
+ case "type":
+ mount.Type = swarm.MountType(strings.ToUpper(value))
+ case "source":
+ mount.Source = value
+ case "target":
+ mount.Target = value
+ case "writable":
+ mount.Writable, err = strconv.ParseBool(value)
+ if err != nil {
+ return fmt.Errorf("invald value for writable: %s", err.Error())
+ }
+ case "bind-propagation":
+ mount.BindOptions.Propagation = swarm.MountPropagation(strings.ToUpper(value))
+ case "volume-populate":
+ volumeOptions().Populate, err = strconv.ParseBool(value)
+ if err != nil {
+ return fmt.Errorf("invald value for populate: %s", err.Error())
+ }
+ case "volume-label":
+ setValueOnMap(volumeOptions().Labels, value)
+ case "volume-driver":
+ volumeOptions().DriverConfig.Name = value
+ case "volume-driver-opt":
+ if volumeOptions().DriverConfig.Options == nil {
+ volumeOptions().DriverConfig.Options = make(map[string]string)
+ }
+ setValueOnMap(volumeOptions().DriverConfig.Options, value)
+ default:
+ return fmt.Errorf("unexpected key '%s' in '%s'", key, value)
+ }
+ }
+
+ if mount.Type == "" {
+ return fmt.Errorf("type is required")
+ }
+
+ if mount.Target == "" {
+ return fmt.Errorf("target is required")
+ }
+
+ m.values = append(m.values, mount)
+ return nil
+}
+
+// Type returns the type of this option
+func (m *MountOpt) Type() string {
+ return "mount"
+}
+
+// String returns a string repr of this option
+func (m *MountOpt) String() string {
+ mounts := []string{}
+ for _, mount := range m.values {
+ mounts = append(mounts, fmt.Sprintf("%v", mount))
+ }
+ return strings.Join(mounts, ", ")
+}
+
+// Value returns the mounts
+func (m *MountOpt) Value() []swarm.Mount {
+ return m.values
+}
+
+type updateOptions struct {
+ parallelism uint64
+ delay time.Duration
+}
+
+type resourceOptions struct {
+ limitCPU nanoCPUs
+ limitMemBytes memBytes
+ resCPU nanoCPUs
+ resMemBytes memBytes
+}
+
+func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements {
+ return &swarm.ResourceRequirements{
+ Limits: &swarm.Resources{
+ NanoCPUs: r.limitCPU.Value(),
+ MemoryBytes: r.limitMemBytes.Value(),
+ },
+ Reservations: &swarm.Resources{
+ NanoCPUs: r.resCPU.Value(),
+ MemoryBytes: r.resMemBytes.Value(),
+ },
+ }
+}
+
+type restartPolicyOptions struct {
+ condition string
+ delay DurationOpt
+ maxAttempts Uint64Opt
+ window DurationOpt
+}
+
+func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy {
+ return &swarm.RestartPolicy{
+ Condition: swarm.RestartPolicyCondition(r.condition),
+ Delay: r.delay.Value(),
+ MaxAttempts: r.maxAttempts.Value(),
+ Window: r.window.Value(),
+ }
+}
+
+func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig {
+ nets := []swarm.NetworkAttachmentConfig{}
+ for _, network := range networks {
+ nets = append(nets, swarm.NetworkAttachmentConfig{Target: network})
+ }
+ return nets
+}
+
+type endpointOptions struct {
+ mode string
+ ports opts.ListOpts
+}
+
+func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec {
+ portConfigs := []swarm.PortConfig{}
+ // We can ignore errors because the format was already validated by ValidatePort
+ ports, portBindings, _ := nat.ParsePortSpecs(e.ports.GetAll())
+
+ for port := range ports {
+ portConfigs = append(portConfigs, convertPortToPortConfig(port, portBindings)...)
+ }
+
+ return &swarm.EndpointSpec{
+ Mode: swarm.ResolutionMode(e.mode),
+ Ports: portConfigs,
+ }
+}
+
+func convertPortToPortConfig(
+ port nat.Port,
+ portBindings map[nat.Port][]nat.PortBinding,
+) []swarm.PortConfig {
+ ports := []swarm.PortConfig{}
+
+ for _, binding := range portBindings[port] {
+ hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16)
+ ports = append(ports, swarm.PortConfig{
+ //TODO Name: ?
+ Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
+ TargetPort: uint32(port.Int()),
+ PublishedPort: uint32(hostPort),
+ })
+ }
+ return ports
+}
+
+// ValidatePort validates a string is in the expected format for a port definition
+func ValidatePort(value string) (string, error) {
+ portMappings, err := nat.ParsePortSpec(value)
+ for _, portMapping := range portMappings {
+ if portMapping.Binding.HostIP != "" {
+ return "", fmt.Errorf("HostIP is not supported by a service.")
+ }
+ }
+ return value, err
+}
+
+type serviceOptions struct {
+ name string
+ labels opts.ListOpts
+ image string
+ command []string
+ args []string
+ env opts.ListOpts
+ workdir string
+ user string
+ mounts MountOpt
+
+ resources resourceOptions
+ stopGrace DurationOpt
+
+ replicas Uint64Opt
+ mode string
+
+ restartPolicy restartPolicyOptions
+ constraints []string
+ update updateOptions
+ networks []string
+ endpoint endpointOptions
+}
+
+func newServiceOptions() *serviceOptions {
+ return &serviceOptions{
+ labels: opts.NewListOpts(runconfigopts.ValidateEnv),
+ env: opts.NewListOpts(runconfigopts.ValidateEnv),
+ endpoint: endpointOptions{
+ ports: opts.NewListOpts(ValidatePort),
+ },
+ }
+}
+
+func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) {
+ var service swarm.ServiceSpec
+
+ service = swarm.ServiceSpec{
+ Annotations: swarm.Annotations{
+ Name: opts.name,
+ Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()),
+ },
+ TaskTemplate: swarm.TaskSpec{
+ ContainerSpec: swarm.ContainerSpec{
+ Image: opts.image,
+ Command: opts.command,
+ Args: opts.args,
+ Env: opts.env.GetAll(),
+ Dir: opts.workdir,
+ User: opts.user,
+ Mounts: opts.mounts.Value(),
+ StopGracePeriod: opts.stopGrace.Value(),
+ },
+ Resources: opts.resources.ToResourceRequirements(),
+ RestartPolicy: opts.restartPolicy.ToRestartPolicy(),
+ Placement: &swarm.Placement{
+ Constraints: opts.constraints,
+ },
+ },
+ Mode: swarm.ServiceMode{},
+ UpdateConfig: &swarm.UpdateConfig{
+ Parallelism: opts.update.parallelism,
+ Delay: opts.update.delay,
+ },
+ Networks: convertNetworks(opts.networks),
+ EndpointSpec: opts.endpoint.ToEndpointSpec(),
+ }
+
+ switch opts.mode {
+ case "global":
+ if opts.replicas.Value() != nil {
+ return service, fmt.Errorf("replicas can only be used with replicated mode")
+ }
+
+ service.Mode.Global = &swarm.GlobalService{}
+ case "replicated":
+ service.Mode.Replicated = &swarm.ReplicatedService{
+ Replicas: opts.replicas.Value(),
+ }
+ default:
+ return service, fmt.Errorf("Unknown mode: %s", opts.mode)
+ }
+ return service, nil
+}
+
+// addServiceFlags adds all flags that are common to both `create` and `update.
+// Any flags that are not common are added separately in the individual command
+func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
+ flags := cmd.Flags()
+ flags.StringVar(&opts.name, "name", "", "Service name")
+ flags.VarP(&opts.labels, "label", "l", "Service labels")
+
+ flags.VarP(&opts.env, "env", "e", "Set environment variables")
+ flags.StringVarP(&opts.workdir, "workdir", "w", "", "Working directory inside the container")
+ flags.StringVarP(&opts.user, "user", "u", "", "Username or UID")
+ flags.VarP(&opts.mounts, "mount", "m", "Attach a mount to the service")
+
+ flags.Var(&opts.resources.limitCPU, "limit-cpu", "Limit CPUs")
+ flags.Var(&opts.resources.limitMemBytes, "limit-memory", "Limit Memory")
+ flags.Var(&opts.resources.resCPU, "reserve-cpu", "Reserve CPUs")
+ flags.Var(&opts.resources.resMemBytes, "reserve-memory", "Reserve Memory")
+ flags.Var(&opts.stopGrace, "stop-grace-period", "Time to wait before force killing a container")
+
+ flags.StringVar(&opts.mode, "mode", "replicated", "Service mode (replicated or global)")
+ flags.Var(&opts.replicas, "replicas", "Number of tasks")
+
+ flags.StringVar(&opts.restartPolicy.condition, "restart-condition", "", "Restart when condition is met (none, on_failure, or any)")
+ flags.Var(&opts.restartPolicy.delay, "restart-delay", "Delay between restart attempts")
+ flags.Var(&opts.restartPolicy.maxAttempts, "restart-max-attempts", "Maximum number of restarts before giving up")
+ flags.Var(&opts.restartPolicy.window, "restart-window", "Window used to evalulate the restart policy")
+
+ flags.StringSliceVar(&opts.constraints, "constraint", []string{}, "Placement constraints")
+
+ flags.Uint64Var(&opts.update.parallelism, "update-parallelism", 1, "Maximum number of tasks updated simultaneously")
+ flags.DurationVar(&opts.update.delay, "update-delay", time.Duration(0), "Delay between updates")
+
+ flags.StringSliceVar(&opts.networks, "network", []string{}, "Network attachments")
+ flags.StringVar(&opts.endpoint.mode, "endpoint-mode", "", "Endpoint mode(Valid values: VIP, DNSRR)")
+ flags.VarP(&opts.endpoint.ports, "publish", "p", "Publish a port as a node port")
+}
diff --git a/api/client/service/remove.go b/api/client/service/remove.go
new file mode 100644
index 0000000000..acbdae0f2f
--- /dev/null
+++ b/api/client/service/remove.go
@@ -0,0 +1,47 @@
+package service
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/spf13/cobra"
+ "golang.org/x/net/context"
+)
+
+func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command {
+
+ cmd := &cobra.Command{
+ Use: "rm [OPTIONS] SERVICE",
+ Aliases: []string{"remove"},
+ Short: "Remove a service",
+ Args: cli.RequiresMinArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runRemove(dockerCli, args)
+ },
+ }
+ cmd.Flags()
+
+ return cmd
+}
+
+func runRemove(dockerCli *client.DockerCli, sids []string) error {
+ client := dockerCli.Client()
+
+ ctx := context.Background()
+
+ var errs []string
+ for _, sid := range sids {
+ err := client.ServiceRemove(ctx, sid)
+ if err != nil {
+ errs = append(errs, err.Error())
+ continue
+ }
+ fmt.Fprintf(dockerCli.Out(), "%s\n", sid)
+ }
+ if len(errs) > 0 {
+ return fmt.Errorf(strings.Join(errs, "\n"))
+ }
+ return nil
+}
diff --git a/api/client/service/scale.go b/api/client/service/scale.go
new file mode 100644
index 0000000000..ae528b55d3
--- /dev/null
+++ b/api/client/service/scale.go
@@ -0,0 +1,86 @@
+package service
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/spf13/cobra"
+)
+
+func newScaleCommand(dockerCli *client.DockerCli) *cobra.Command {
+ return &cobra.Command{
+ Use: "scale SERVICE=SCALE [SERVICE=SCALE...]",
+ Short: "Scale one or multiple services",
+ Args: scaleArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runScale(dockerCli, args)
+ },
+ }
+}
+
+func scaleArgs(cmd *cobra.Command, args []string) error {
+ if err := cli.RequiresMinArgs(1)(cmd, args); err != nil {
+ return err
+ }
+ for _, arg := range args {
+ if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 {
+ return fmt.Errorf(
+ "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
+ arg,
+ cmd.CommandPath(),
+ cmd.UseLine(),
+ cmd.Short,
+ )
+ }
+ }
+ return nil
+}
+
+func runScale(dockerCli *client.DockerCli, args []string) error {
+ var errors []string
+ for _, arg := range args {
+ parts := strings.SplitN(arg, "=", 2)
+ serviceID, scale := parts[0], parts[1]
+ if err := runServiceScale(dockerCli, serviceID, scale); err != nil {
+ errors = append(errors, fmt.Sprintf("%s: %s", serviceID, err.Error()))
+ }
+ }
+
+ if len(errors) == 0 {
+ return nil
+ }
+ return fmt.Errorf(strings.Join(errors, "\n"))
+}
+
+func runServiceScale(dockerCli *client.DockerCli, serviceID string, scale string) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ service, err := client.ServiceInspect(ctx, serviceID)
+ if err != nil {
+ return err
+ }
+
+ serviceMode := &service.Spec.Mode
+ if serviceMode.Replicated == nil {
+ return fmt.Errorf("scale can only be used with replicated mode")
+ }
+ uintScale, err := strconv.ParseUint(scale, 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid replicas value %s: %s", scale, err.Error())
+ }
+ serviceMode.Replicated.Replicas = &uintScale
+
+ err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(dockerCli.Out(), "%s scaled to %s\n", serviceID, scale)
+ return nil
+}
diff --git a/api/client/service/tasks.go b/api/client/service/tasks.go
new file mode 100644
index 0000000000..6169d8bdb7
--- /dev/null
+++ b/api/client/service/tasks.go
@@ -0,0 +1,65 @@
+package service
+
+import (
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/api/client/idresolver"
+ "github.com/docker/docker/api/client/task"
+ "github.com/docker/docker/cli"
+ "github.com/docker/docker/opts"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+)
+
+type tasksOptions struct {
+ serviceID string
+ all bool
+ noResolve bool
+ filter opts.FilterOpt
+}
+
+func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := tasksOptions{filter: opts.NewFilterOpt()}
+
+ cmd := &cobra.Command{
+ Use: "tasks [OPTIONS] SERVICE",
+ Short: "List the tasks of a service",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.serviceID = args[0]
+ return runTasks(dockerCli, opts)
+ },
+ }
+ flags := cmd.Flags()
+ flags.BoolVarP(&opts.all, "all", "a", false, "Display all tasks")
+ flags.BoolVarP(&opts.noResolve, "no-resolve", "n", false, "Do not map IDs to Names")
+ flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
+
+ return cmd
+}
+
+func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ service, err := client.ServiceInspect(ctx, opts.serviceID)
+ if err != nil {
+ return err
+ }
+
+ filter := opts.filter.Value()
+ filter.Add("service", service.ID)
+ if !opts.all && !filter.Include("desired_state") {
+ filter.Add("desired_state", string(swarm.TaskStateRunning))
+ filter.Add("desired_state", string(swarm.TaskStateAccepted))
+ }
+
+ tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter})
+ if err != nil {
+ return err
+ }
+
+ return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve))
+}
diff --git a/api/client/service/update.go b/api/client/service/update.go
new file mode 100644
index 0000000000..f5483235e8
--- /dev/null
+++ b/api/client/service/update.go
@@ -0,0 +1,244 @@
+package service
+
+import (
+ "fmt"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/docker/opts"
+ runconfigopts "github.com/docker/docker/runconfig/opts"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/docker/go-connections/nat"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := newServiceOptions()
+ var flags *pflag.FlagSet
+
+ cmd := &cobra.Command{
+ Use: "update [OPTIONS] SERVICE",
+ Short: "Update a service",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runUpdate(dockerCli, flags, args[0])
+ },
+ }
+
+ flags = cmd.Flags()
+ flags.String("image", "", "Service image tag")
+ flags.StringSlice("command", []string{}, "Service command")
+ flags.StringSlice("arg", []string{}, "Service command args")
+ addServiceFlags(cmd, opts)
+ return cmd
+}
+
+func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID string) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ service, err := client.ServiceInspect(ctx, serviceID)
+ if err != nil {
+ return err
+ }
+
+ err = mergeService(&service.Spec, flags)
+ if err != nil {
+ return err
+ }
+ err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID)
+ return nil
+}
+
+func mergeService(spec *swarm.ServiceSpec, flags *pflag.FlagSet) error {
+
+ mergeString := func(flag string, field *string) {
+ if flags.Changed(flag) {
+ *field, _ = flags.GetString(flag)
+ }
+ }
+
+ mergeListOpts := func(flag string, field *[]string) {
+ if flags.Changed(flag) {
+ value := flags.Lookup(flag).Value.(*opts.ListOpts)
+ *field = value.GetAll()
+ }
+ }
+
+ mergeSlice := func(flag string, field *[]string) {
+ if flags.Changed(flag) {
+ *field, _ = flags.GetStringSlice(flag)
+ }
+ }
+
+ mergeInt64Value := func(flag string, field *int64) {
+ if flags.Changed(flag) {
+ *field = flags.Lookup(flag).Value.(int64Value).Value()
+ }
+ }
+
+ mergeDuration := func(flag string, field *time.Duration) {
+ if flags.Changed(flag) {
+ *field, _ = flags.GetDuration(flag)
+ }
+ }
+
+ mergeDurationOpt := func(flag string, field *time.Duration) {
+ if flags.Changed(flag) {
+ *field = *flags.Lookup(flag).Value.(*DurationOpt).Value()
+ }
+ }
+
+ mergeUint64 := func(flag string, field *uint64) {
+ if flags.Changed(flag) {
+ *field, _ = flags.GetUint64(flag)
+ }
+ }
+
+ mergeUint64Opt := func(flag string, field *uint64) {
+ if flags.Changed(flag) {
+ *field = *flags.Lookup(flag).Value.(*Uint64Opt).Value()
+ }
+ }
+
+ cspec := &spec.TaskTemplate.ContainerSpec
+ task := &spec.TaskTemplate
+ mergeString("name", &spec.Name)
+ mergeLabels(flags, &spec.Labels)
+ mergeString("image", &cspec.Image)
+ mergeSlice("command", &cspec.Command)
+ mergeSlice("arg", &cspec.Command)
+ mergeListOpts("env", &cspec.Env)
+ mergeString("workdir", &cspec.Dir)
+ mergeString("user", &cspec.User)
+ mergeMounts(flags, &cspec.Mounts)
+
+ mergeInt64Value("limit-cpu", &task.Resources.Limits.NanoCPUs)
+ mergeInt64Value("limit-memory", &task.Resources.Limits.MemoryBytes)
+ mergeInt64Value("reserve-cpu", &task.Resources.Reservations.NanoCPUs)
+ mergeInt64Value("reserve-memory", &task.Resources.Reservations.MemoryBytes)
+
+ mergeDurationOpt("stop-grace-period", cspec.StopGracePeriod)
+
+ if flags.Changed("restart-policy-condition") {
+ value, _ := flags.GetString("restart-policy-condition")
+ task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value)
+ }
+ mergeDurationOpt("restart-policy-delay", task.RestartPolicy.Delay)
+ mergeUint64Opt("restart-policy-max-attempts", task.RestartPolicy.MaxAttempts)
+ mergeDurationOpt("restart-policy-window", task.RestartPolicy.Window)
+ mergeSlice("constraint", &task.Placement.Constraints)
+
+ if err := mergeMode(flags, &spec.Mode); err != nil {
+ return err
+ }
+
+ mergeUint64("updateconfig-parallelism", &spec.UpdateConfig.Parallelism)
+ mergeDuration("updateconfig-delay", &spec.UpdateConfig.Delay)
+
+ mergeNetworks(flags, &spec.Networks)
+ if flags.Changed("endpoint-mode") {
+ value, _ := flags.GetString("endpoint-mode")
+ spec.EndpointSpec.Mode = swarm.ResolutionMode(value)
+ }
+
+ mergePorts(flags, &spec.EndpointSpec.Ports)
+
+ return nil
+}
+
+func mergeLabels(flags *pflag.FlagSet, field *map[string]string) {
+ if !flags.Changed("label") {
+ return
+ }
+
+ if *field == nil {
+ *field = make(map[string]string)
+ }
+
+ values := flags.Lookup("label").Value.(*opts.ListOpts).GetAll()
+ for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
+ (*field)[key] = value
+ }
+}
+
+// TODO: should this override by destination path, or does swarm handle that?
+func mergeMounts(flags *pflag.FlagSet, mounts *[]swarm.Mount) {
+ if !flags.Changed("mount") {
+ return
+ }
+
+ values := flags.Lookup("mount").Value.(*MountOpt).Value()
+ *mounts = append(*mounts, values...)
+}
+
+// TODO: should this override by name, or does swarm handle that?
+func mergePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) {
+ if !flags.Changed("ports") {
+ return
+ }
+
+ values := flags.Lookup("ports").Value.(*opts.ListOpts).GetAll()
+ ports, portBindings, _ := nat.ParsePortSpecs(values)
+
+ for port := range ports {
+ *portConfig = append(*portConfig, convertPortToPortConfig(port, portBindings)...)
+ }
+}
+
+func mergeNetworks(flags *pflag.FlagSet, attachments *[]swarm.NetworkAttachmentConfig) {
+ if !flags.Changed("network") {
+ return
+ }
+ networks, _ := flags.GetStringSlice("network")
+ for _, network := range networks {
+ *attachments = append(*attachments, swarm.NetworkAttachmentConfig{Target: network})
+ }
+}
+
+func mergeMode(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error {
+ if !flags.Changed("mode") && !flags.Changed("scale") {
+ return nil
+ }
+
+ var mode string
+ if flags.Changed("mode") {
+ mode, _ = flags.GetString("mode")
+ }
+
+ if !(mode == "replicated" || serviceMode.Replicated != nil) && flags.Changed("replicas") {
+ return fmt.Errorf("replicas can only be used with replicated mode")
+ }
+
+ if mode == "global" {
+ serviceMode.Replicated = nil
+ serviceMode.Global = &swarm.GlobalService{}
+ return nil
+ }
+
+ if flags.Changed("replicas") {
+ replicas := flags.Lookup("replicas").Value.(*Uint64Opt).Value()
+ serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
+ serviceMode.Global = nil
+ return nil
+ }
+
+ if mode == "replicated" {
+ if serviceMode.Replicated != nil {
+ return nil
+ }
+ serviceMode.Replicated = &swarm.ReplicatedService{Replicas: &DefaultReplicas}
+ serviceMode.Global = nil
+ }
+
+ return nil
+}
diff --git a/api/client/swarm/cmd.go b/api/client/swarm/cmd.go
new file mode 100644
index 0000000000..0c40d20d9c
--- /dev/null
+++ b/api/client/swarm/cmd.go
@@ -0,0 +1,30 @@
+package swarm
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+)
+
+// NewSwarmCommand returns a cobra command for `swarm` subcommands
+func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "swarm",
+ Short: "Manage docker swarm",
+ Args: cli.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
+ },
+ }
+ cmd.AddCommand(
+ newInitCommand(dockerCli),
+ newJoinCommand(dockerCli),
+ newUpdateCommand(dockerCli),
+ newLeaveCommand(dockerCli),
+ newInspectCommand(dockerCli),
+ )
+ return cmd
+}
diff --git a/api/client/swarm/init.go b/api/client/swarm/init.go
new file mode 100644
index 0000000000..0c66246390
--- /dev/null
+++ b/api/client/swarm/init.go
@@ -0,0 +1,61 @@
+package swarm
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+)
+
+type initOptions struct {
+ listenAddr NodeAddrOption
+ autoAccept AutoAcceptOption
+ forceNewCluster bool
+ secret string
+}
+
+func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := initOptions{
+ listenAddr: NewNodeAddrOption(),
+ autoAccept: NewAutoAcceptOption(),
+ }
+
+ cmd := &cobra.Command{
+ Use: "init",
+ Short: "Initialize a Swarm.",
+ Args: cli.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runInit(dockerCli, opts)
+ },
+ }
+
+ flags := cmd.Flags()
+ flags.Var(&opts.listenAddr, "listen-addr", "Listen address")
+ flags.Var(&opts.autoAccept, "auto-accept", "Auto acceptance policy (worker, manager, or none)")
+ flags.StringVar(&opts.secret, "secret", "", "Set secret value needed to accept nodes into cluster")
+ flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state.")
+ return cmd
+}
+
+func runInit(dockerCli *client.DockerCli, opts initOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ req := swarm.InitRequest{
+ ListenAddr: opts.listenAddr.String(),
+ ForceNewCluster: opts.forceNewCluster,
+ }
+
+ req.Spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(opts.secret)
+
+ nodeID, err := client.SwarmInit(ctx, req)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Swarm initialized: current node (%s) is now a manager.\n", nodeID)
+ return nil
+}
diff --git a/api/client/swarm/inspect.go b/api/client/swarm/inspect.go
new file mode 100644
index 0000000000..407a0bfb83
--- /dev/null
+++ b/api/client/swarm/inspect.go
@@ -0,0 +1,56 @@
+package swarm
+
+import (
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/api/client/inspect"
+ "github.com/docker/docker/cli"
+ "github.com/spf13/cobra"
+)
+
+type inspectOptions struct {
+ format string
+ // pretty bool
+}
+
+func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
+ var opts inspectOptions
+
+ cmd := &cobra.Command{
+ Use: "inspect [OPTIONS]",
+ Short: "Inspect the Swarm",
+ Args: cli.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // if opts.pretty && len(opts.format) > 0 {
+ // return fmt.Errorf("--format is incompatible with human friendly format")
+ // }
+ return runInspect(dockerCli, opts)
+ },
+ }
+
+ flags := cmd.Flags()
+ flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template")
+ //flags.BoolVarP(&opts.pretty, "pretty", "h", false, "Print the information in a human friendly format.")
+ return cmd
+}
+
+func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ swarm, err := client.SwarmInspect(ctx)
+ if err != nil {
+ return err
+ }
+
+ getRef := func(_ string) (interface{}, []byte, error) {
+ return swarm, nil, nil
+ }
+
+ // if !opts.pretty {
+ return inspect.Inspect(dockerCli.Out(), []string{""}, opts.format, getRef)
+ // }
+
+ //return printHumanFriendly(dockerCli.Out(), opts.refs, getRef)
+}
diff --git a/api/client/swarm/join.go b/api/client/swarm/join.go
new file mode 100644
index 0000000000..346445f783
--- /dev/null
+++ b/api/client/swarm/join.go
@@ -0,0 +1,65 @@
+package swarm
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+ "golang.org/x/net/context"
+)
+
+type joinOptions struct {
+ remote string
+ listenAddr NodeAddrOption
+ manager bool
+ secret string
+ CACertHash string
+}
+
+func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := joinOptions{
+ listenAddr: NodeAddrOption{addr: defaultListenAddr},
+ }
+
+ cmd := &cobra.Command{
+ Use: "join [OPTIONS] HOST:PORT",
+ Short: "Join a Swarm as a node and/or manager.",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.remote = args[0]
+ return runJoin(dockerCli, opts)
+ },
+ }
+
+ flags := cmd.Flags()
+ flags.Var(&opts.listenAddr, "listen-addr", "Listen address")
+ flags.BoolVar(&opts.manager, "manager", false, "Try joining as a manager.")
+ flags.StringVar(&opts.secret, "secret", "", "Secret for node acceptance")
+ flags.StringVar(&opts.CACertHash, "ca-hash", "", "Hash of the Root Certificate Authority certificate used for trusted join")
+ return cmd
+}
+
+func runJoin(dockerCli *client.DockerCli, opts joinOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ req := swarm.JoinRequest{
+ Manager: opts.manager,
+ Secret: opts.secret,
+ ListenAddr: opts.listenAddr.String(),
+ RemoteAddrs: []string{opts.remote},
+ CACertHash: opts.CACertHash,
+ }
+ err := client.SwarmJoin(ctx, req)
+ if err != nil {
+ return err
+ }
+ if opts.manager {
+ fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a manager.")
+ } else {
+ fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a worker.")
+ }
+ return nil
+}
diff --git a/api/client/swarm/leave.go b/api/client/swarm/leave.go
new file mode 100644
index 0000000000..e3f8fbfa57
--- /dev/null
+++ b/api/client/swarm/leave.go
@@ -0,0 +1,44 @@
+package swarm
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/spf13/cobra"
+)
+
+type leaveOptions struct {
+ force bool
+}
+
+func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := leaveOptions{}
+
+ cmd := &cobra.Command{
+ Use: "leave",
+ Short: "Leave a Swarm.",
+ Args: cli.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runLeave(dockerCli, opts)
+ },
+ }
+
+ flags := cmd.Flags()
+ flags.BoolVar(&opts.force, "force", false, "Force leave ignoring warnings.")
+ return cmd
+}
+
+func runLeave(dockerCli *client.DockerCli, opts leaveOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ if err := client.SwarmLeave(ctx, opts.force); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(dockerCli.Out(), "Node left the default swarm.")
+ return nil
+}
diff --git a/api/client/swarm/opts.go b/api/client/swarm/opts.go
new file mode 100644
index 0000000000..fa543b6596
--- /dev/null
+++ b/api/client/swarm/opts.go
@@ -0,0 +1,120 @@
+package swarm
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/engine-api/types/swarm"
+)
+
+const (
+ defaultListenAddr = "0.0.0.0:2377"
+ // WORKER constant for worker name
+ WORKER = "WORKER"
+ // MANAGER constant for manager name
+ MANAGER = "MANAGER"
+)
+
+var (
+ defaultPolicies = []swarm.Policy{
+ {Role: WORKER, Autoaccept: true},
+ {Role: MANAGER, Autoaccept: false},
+ }
+)
+
+// NodeAddrOption is a pflag.Value for listen and remote addresses
+type NodeAddrOption struct {
+ addr string
+}
+
+// String prints the representation of this flag
+func (a *NodeAddrOption) String() string {
+ return a.addr
+}
+
+// Set the value for this flag
+func (a *NodeAddrOption) Set(value string) error {
+ if !strings.Contains(value, ":") {
+ return fmt.Errorf("Invalud url, a host and port are required")
+ }
+
+ parts := strings.Split(value, ":")
+ if len(parts) != 2 {
+ return fmt.Errorf("Invalud url, too many colons")
+ }
+
+ a.addr = value
+ return nil
+}
+
+// Type returns the type of this flag
+func (a *NodeAddrOption) Type() string {
+ return "node-addr"
+}
+
+// NewNodeAddrOption returns a new node address option
+func NewNodeAddrOption() NodeAddrOption {
+ return NodeAddrOption{addr: defaultListenAddr}
+}
+
+// AutoAcceptOption is a value type for auto-accept policy
+type AutoAcceptOption struct {
+ values map[string]bool
+}
+
+// String prints a string representation of this option
+func (o *AutoAcceptOption) String() string {
+ keys := []string{}
+ for key := range o.values {
+ keys = append(keys, key)
+ }
+ return strings.Join(keys, " ")
+}
+
+// Set sets a new value on this option
+func (o *AutoAcceptOption) Set(value string) error {
+ value = strings.ToUpper(value)
+ switch value {
+ case "", "NONE":
+ if accept, ok := o.values[WORKER]; ok && accept {
+ return fmt.Errorf("value NONE is incompatible with %s", WORKER)
+ }
+ if accept, ok := o.values[MANAGER]; ok && accept {
+ return fmt.Errorf("value NONE is incompatible with %s", MANAGER)
+ }
+ o.values[WORKER] = false
+ o.values[MANAGER] = false
+ case WORKER, MANAGER:
+ if accept, ok := o.values[value]; ok && !accept {
+ return fmt.Errorf("value NONE is incompatible with %s", value)
+ }
+ o.values[value] = true
+ default:
+ return fmt.Errorf("must be one of %s, %s, NONE", WORKER, MANAGER)
+ }
+
+ return nil
+}
+
+// Type returns the type of this option
+func (o *AutoAcceptOption) Type() string {
+ return "auto-accept"
+}
+
+// Policies returns a representation of this option for the api
+func (o *AutoAcceptOption) Policies(secret string) []swarm.Policy {
+ policies := []swarm.Policy{}
+ for _, p := range defaultPolicies {
+ if len(o.values) != 0 {
+ p.Autoaccept = o.values[string(p.Role)]
+ }
+ p.Secret = secret
+ policies = append(policies, p)
+ }
+ return policies
+}
+
+// NewAutoAcceptOption returns a new auto-accept option
+func NewAutoAcceptOption() AutoAcceptOption {
+ return AutoAcceptOption{values: make(map[string]bool)}
+}
diff --git a/api/client/swarm/update.go b/api/client/swarm/update.go
new file mode 100644
index 0000000000..59aef4c8af
--- /dev/null
+++ b/api/client/swarm/update.go
@@ -0,0 +1,93 @@
+package swarm
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/cli"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+type updateOptions struct {
+ autoAccept AutoAcceptOption
+ secret string
+ taskHistoryLimit int64
+ heartbeatPeriod uint64
+}
+
+func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
+ opts := updateOptions{autoAccept: NewAutoAcceptOption()}
+ var flags *pflag.FlagSet
+
+ cmd := &cobra.Command{
+ Use: "update",
+ Short: "update the Swarm.",
+ Args: cli.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runUpdate(dockerCli, flags, opts)
+ },
+ }
+
+ flags = cmd.Flags()
+ flags.Var(&opts.autoAccept, "auto-accept", "Auto acceptance policy (worker, manager or none)")
+ flags.StringVar(&opts.secret, "secret", "", "Set secret value needed to accept nodes into cluster")
+ flags.Int64Var(&opts.taskHistoryLimit, "task-history-limit", 10, "Task history retention limit")
+ flags.Uint64Var(&opts.heartbeatPeriod, "dispatcher-heartbeat-period", 5000000000, "Dispatcher heartbeat period")
+ return cmd
+}
+
+func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts updateOptions) error {
+ client := dockerCli.Client()
+ ctx := context.Background()
+
+ swarm, err := client.SwarmInspect(ctx)
+ if err != nil {
+ return err
+ }
+
+ err = mergeSwarm(&swarm, flags)
+ if err != nil {
+ return err
+ }
+ err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Swarm updated.")
+ return nil
+}
+
+func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error {
+ spec := &swarm.Spec
+
+ if flags.Changed("auto-accept") {
+ value := flags.Lookup("auto-accept").Value.(*AutoAcceptOption)
+ if len(spec.AcceptancePolicy.Policies) > 0 {
+ spec.AcceptancePolicy.Policies = value.Policies(spec.AcceptancePolicy.Policies[0].Secret)
+ } else {
+ spec.AcceptancePolicy.Policies = value.Policies("")
+ }
+ }
+
+ if flags.Changed("secret") {
+ secret, _ := flags.GetString("secret")
+ for _, policy := range spec.AcceptancePolicy.Policies {
+ policy.Secret = secret
+ }
+ }
+
+ if flags.Changed("task-history-limit") {
+ spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64("task-history-limit")
+ }
+
+ if flags.Changed("dispatcher-heartbeat-period") {
+ spec.Dispatcher.HeartbeatPeriod, _ = flags.GetUint64("dispatcher-heartbeat-period")
+ }
+
+ return nil
+}
diff --git a/api/client/tag.go b/api/client/tag.go
new file mode 100644
index 0000000000..0b6a073ccb
--- /dev/null
+++ b/api/client/tag.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "golang.org/x/net/context"
+
+ Cli "github.com/docker/docker/cli"
+ flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdTag tags an image into a repository.
+//
+// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]
+func (cli *DockerCli) CmdTag(args ...string) error {
+ cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true)
+ cmd.Require(flag.Exact, 2)
+
+ cmd.ParseFlags(args, true)
+
+ return cli.client.ImageTag(context.Background(), cmd.Arg(0), cmd.Arg(1))
+}
diff --git a/api/client/task/print.go b/api/client/task/print.go
new file mode 100644
index 0000000000..4909f159d5
--- /dev/null
+++ b/api/client/task/print.go
@@ -0,0 +1,79 @@
+package task
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/client"
+ "github.com/docker/docker/api/client/idresolver"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/docker/go-units"
+)
+
+const (
+ psTaskItemFmt = "%s\t%s\t%s\t%s\t%s %s\t%s\t%s\n"
+)
+
+type tasksBySlot []swarm.Task
+
+func (t tasksBySlot) Len() int {
+ return len(t)
+}
+
+func (t tasksBySlot) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+func (t tasksBySlot) Less(i, j int) bool {
+ // Sort by slot.
+ if t[i].Slot != t[j].Slot {
+ return t[i].Slot < t[j].Slot
+ }
+
+ // If same slot, sort by most recent.
+ return t[j].Meta.CreatedAt.Before(t[i].CreatedAt)
+}
+
+// Print task information in a table format
+func Print(dockerCli *client.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver) error {
+ sort.Stable(tasksBySlot(tasks))
+
+ writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0)
+
+ // Ignore flushing errors
+ defer writer.Flush()
+ fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "SERVICE", "IMAGE", "LAST STATE", "DESIRED STATE", "NODE"}, "\t"))
+ for _, task := range tasks {
+ serviceValue, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID)
+ if err != nil {
+ return err
+ }
+ nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID)
+ if err != nil {
+ return err
+ }
+ name := serviceValue
+ if task.Slot > 0 {
+ name = fmt.Sprintf("%s.%d", name, task.Slot)
+ }
+ fmt.Fprintf(
+ writer,
+ psTaskItemFmt,
+ task.ID,
+ name,
+ serviceValue,
+ task.Spec.ContainerSpec.Image,
+ client.PrettyPrint(task.Status.State),
+ units.HumanDuration(time.Since(task.Status.Timestamp)),
+ client.PrettyPrint(task.DesiredState),
+ nodeValue,
+ )
+ }
+
+ return nil
+}
diff --git a/api/client/utils.go b/api/client/utils.go
index e7497ac41a..043444eae5 100644
--- a/api/client/utils.go
+++ b/api/client/utils.go
@@ -8,6 +8,7 @@ import (
gosignal "os/signal"
"path/filepath"
"runtime"
+ "strings"
"time"
"golang.org/x/net/context"
@@ -163,3 +164,27 @@ func (cli *DockerCli) ForwardAllSignals(ctx context.Context, cid string) chan os
}()
return sigc
}
+
+// capitalizeFirst capitalizes the first character of string
+func capitalizeFirst(s string) string {
+ switch l := len(s); l {
+ case 0:
+ return s
+ case 1:
+ return strings.ToLower(s)
+ default:
+ return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
+ }
+}
+
+// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
+func PrettyPrint(i interface{}) string {
+ switch t := i.(type) {
+ case nil:
+ return "None"
+ case string:
+ return capitalizeFirst(t)
+ default:
+ return capitalizeFirst(fmt.Sprintf("%s", t))
+ }
+}
diff --git a/api/server/httputils/errors.go b/api/server/httputils/errors.go
index 0f89dd90fc..da4db97915 100644
--- a/api/server/httputils/errors.go
+++ b/api/server/httputils/errors.go
@@ -8,6 +8,7 @@ import (
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/versions"
"github.com/gorilla/mux"
+ "google.golang.org/grpc"
)
// httpStatusError is an interface
@@ -58,6 +59,7 @@ func GetHTTPErrorStatusCode(err error) int {
"wrong login/password": http.StatusUnauthorized,
"unauthorized": http.StatusUnauthorized,
"hasn't been activated": http.StatusForbidden,
+ "this node": http.StatusNotAcceptable,
} {
if strings.Contains(errStr, keyword) {
statusCode = status
@@ -85,7 +87,7 @@ func MakeErrorHandler(err error) http.HandlerFunc {
}
WriteJSON(w, statusCode, response)
} else {
- http.Error(w, err.Error(), statusCode)
+ http.Error(w, grpc.ErrorDesc(err), statusCode)
}
}
}
diff --git a/api/server/router/network/backend.go b/api/server/router/network/backend.go
index 4873e1ea28..6e322fa378 100644
--- a/api/server/router/network/backend.go
+++ b/api/server/router/network/backend.go
@@ -2,7 +2,6 @@ package network
import (
"github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
"github.com/docker/libnetwork"
)
@@ -13,7 +12,7 @@ type Backend interface {
FindNetwork(idName string) (libnetwork.Network, error)
GetNetworkByName(idName string) (libnetwork.Network, error)
GetNetworksByID(partialID string) []libnetwork.Network
- FilterNetworks(netFilters filters.Args) ([]libnetwork.Network, error)
+ GetNetworks() []libnetwork.Network
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error
diff --git a/api/server/router/network/filter.go b/api/server/router/network/filter.go
new file mode 100644
index 0000000000..b1c1dd187d
--- /dev/null
+++ b/api/server/router/network/filter.go
@@ -0,0 +1,98 @@
+package network
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/runconfig"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+)
+
+type filterHandler func([]types.NetworkResource, string) ([]types.NetworkResource, error)
+
+var (
+ // AcceptedFilters is an acceptable filters for validation
+ AcceptedFilters = map[string]bool{
+ "driver": true,
+ "type": true,
+ "name": true,
+ "id": true,
+ "label": true,
+ }
+)
+
+func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) {
+ switch netType {
+ case "builtin":
+ for _, nw := range nws {
+ if runconfig.IsPreDefinedNetwork(nw.Name) {
+ retNws = append(retNws, nw)
+ }
+ }
+ case "custom":
+ for _, nw := range nws {
+ if !runconfig.IsPreDefinedNetwork(nw.Name) {
+ retNws = append(retNws, nw)
+ }
+ }
+ default:
+ return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType)
+ }
+ return retNws, nil
+}
+
+// filterNetworks filters network list according to user specified filter
+// and returns user chosen networks
+func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) {
+ // if filter is empty, return original network list
+ if filter.Len() == 0 {
+ return nws, nil
+ }
+
+ if err := filter.Validate(AcceptedFilters); err != nil {
+ return nil, err
+ }
+
+ var displayNet []types.NetworkResource
+ for _, nw := range nws {
+ if filter.Include("driver") {
+ if !filter.ExactMatch("driver", nw.Driver) {
+ continue
+ }
+ }
+ if filter.Include("name") {
+ if !filter.Match("name", nw.Name) {
+ continue
+ }
+ }
+ if filter.Include("id") {
+ if !filter.Match("id", nw.ID) {
+ continue
+ }
+ }
+ if filter.Include("label") {
+ if !filter.MatchKVList("label", nw.Labels) {
+ continue
+ }
+ }
+ displayNet = append(displayNet, nw)
+ }
+
+ if filter.Include("type") {
+ var typeNet []types.NetworkResource
+ errFilter := filter.WalkValues("type", func(fval string) error {
+ passList, err := filterNetworkByType(displayNet, fval)
+ if err != nil {
+ return err
+ }
+ typeNet = append(typeNet, passList...)
+ return nil
+ })
+ if errFilter != nil {
+ return nil, errFilter
+ }
+ displayNet = typeNet
+ }
+
+ return displayNet, nil
+}
diff --git a/api/server/router/network/network.go b/api/server/router/network/network.go
index 7c88089623..8688c3ed1f 100644
--- a/api/server/router/network/network.go
+++ b/api/server/router/network/network.go
@@ -1,17 +1,22 @@
package network
-import "github.com/docker/docker/api/server/router"
+import (
+ "github.com/docker/docker/api/server/router"
+ "github.com/docker/docker/daemon/cluster"
+)
// networkRouter is a router to talk with the network controller
type networkRouter struct {
- backend Backend
- routes []router.Route
+ backend Backend
+ clusterProvider *cluster.Cluster
+ routes []router.Route
}
// NewRouter initializes a new network router
-func NewRouter(b Backend) router.Router {
+func NewRouter(b Backend, c *cluster.Cluster) router.Router {
r := &networkRouter{
- backend: b,
+ backend: b,
+ clusterProvider: c,
}
r.initRoutes()
return r
diff --git a/api/server/router/network/network_routes.go b/api/server/router/network/network_routes.go
index 8da6de1577..7e5b94cb91 100644
--- a/api/server/router/network/network_routes.go
+++ b/api/server/router/network/network_routes.go
@@ -24,17 +24,30 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
return err
}
- list := []*types.NetworkResource{}
+ list := []types.NetworkResource{}
- nwList, err := n.backend.FilterNetworks(netFilters)
- if err != nil {
- return err
+ if nr, err := n.clusterProvider.GetNetworks(); err == nil {
+ for _, nw := range nr {
+ list = append(list, nw)
+ }
}
- for _, nw := range nwList {
- list = append(list, buildNetworkResource(nw))
+ // Combine the network list returned by Docker daemon if it is not already
+ // returned by the cluster manager
+SKIP:
+ for _, nw := range n.backend.GetNetworks() {
+ for _, nl := range list {
+ if nl.ID == nw.ID() {
+ continue SKIP
+ }
+ }
+ list = append(list, *n.buildNetworkResource(nw))
}
+ list, err = filterNetworks(list, netFilters)
+ if err != nil {
+ return err
+ }
return httputils.WriteJSON(w, http.StatusOK, list)
}
@@ -45,9 +58,12 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
nw, err := n.backend.FindNetwork(vars["id"])
if err != nil {
+ if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
+ return httputils.WriteJSON(w, http.StatusOK, nr)
+ }
return err
}
- return httputils.WriteJSON(w, http.StatusOK, buildNetworkResource(nw))
+ return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw))
}
func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -67,7 +83,14 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
nw, err := n.backend.CreateNetwork(create)
if err != nil {
- return err
+ if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
+ return err
+ }
+ id, err := n.clusterProvider.CreateNetwork(create)
+ if err != nil {
+ return err
+ }
+ nw = &types.NetworkCreateResponse{ID: id}
}
return httputils.WriteJSON(w, http.StatusCreated, nw)
@@ -121,6 +144,9 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
if err := httputils.ParseForm(r); err != nil {
return err
}
+ if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
+ return n.clusterProvider.RemoveNetwork(vars["id"])
+ }
if err := n.backend.DeleteNetwork(vars["id"]); err != nil {
return err
}
@@ -128,7 +154,7 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
return nil
}
-func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
+func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
r := &types.NetworkResource{}
if nw == nil {
return r
@@ -138,6 +164,13 @@ func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
r.Name = nw.Name()
r.ID = nw.ID()
r.Scope = info.Scope()
+ if n.clusterProvider.IsManager() {
+ if _, err := n.clusterProvider.GetNetwork(nw.Name()); err == nil {
+ r.Scope = "swarm"
+ }
+ } else if info.Dynamic() {
+ r.Scope = "swarm"
+ }
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
diff --git a/api/server/router/swarm/backend.go b/api/server/router/swarm/backend.go
new file mode 100644
index 0000000000..05fe00a0c2
--- /dev/null
+++ b/api/server/router/swarm/backend.go
@@ -0,0 +1,26 @@
+package swarm
+
+import (
+ basictypes "github.com/docker/engine-api/types"
+ types "github.com/docker/engine-api/types/swarm"
+)
+
+// Backend abstracts an swarm commands manager.
+type Backend interface {
+ Init(req types.InitRequest) (string, error)
+ Join(req types.JoinRequest) error
+ Leave(force bool) error
+ Inspect() (types.Swarm, error)
+ Update(uint64, types.Spec) error
+ GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
+ GetService(string) (types.Service, error)
+ CreateService(types.ServiceSpec) (string, error)
+ UpdateService(string, uint64, types.ServiceSpec) error
+ RemoveService(string) error
+ GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
+ GetNode(string) (types.Node, error)
+ UpdateNode(string, uint64, types.NodeSpec) error
+ RemoveNode(string) error
+ GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
+ GetTask(string) (types.Task, error)
+}
diff --git a/api/server/router/swarm/cluster.go b/api/server/router/swarm/cluster.go
new file mode 100644
index 0000000000..a67ffa9632
--- /dev/null
+++ b/api/server/router/swarm/cluster.go
@@ -0,0 +1,44 @@
+package swarm
+
+import "github.com/docker/docker/api/server/router"
+
+// buildRouter is a router to talk with the build controller
+type swarmRouter struct {
+ backend Backend
+ routes []router.Route
+}
+
+// NewRouter initializes a new build router
+func NewRouter(b Backend) router.Router {
+ r := &swarmRouter{
+ backend: b,
+ }
+ r.initRoutes()
+ return r
+}
+
+// Routes returns the available routers to the swarm controller
+func (sr *swarmRouter) Routes() []router.Route {
+ return sr.routes
+}
+
+func (sr *swarmRouter) initRoutes() {
+ sr.routes = []router.Route{
+ router.NewPostRoute("/swarm/init", sr.initCluster),
+ router.NewPostRoute("/swarm/join", sr.joinCluster),
+ router.NewPostRoute("/swarm/leave", sr.leaveCluster),
+ router.NewGetRoute("/swarm", sr.inspectCluster),
+ router.NewPostRoute("/swarm/update", sr.updateCluster),
+ router.NewGetRoute("/services", sr.getServices),
+ router.NewGetRoute("/services/{id:.*}", sr.getService),
+ router.NewPostRoute("/services/create", sr.createService),
+ router.NewPostRoute("/services/{id:.*}/update", sr.updateService),
+ router.NewDeleteRoute("/services/{id:.*}", sr.removeService),
+ router.NewGetRoute("/nodes", sr.getNodes),
+ router.NewGetRoute("/nodes/{id:.*}", sr.getNode),
+ router.NewDeleteRoute("/nodes/{id:.*}", sr.removeNode),
+ router.NewPostRoute("/nodes/{id:.*}/update", sr.updateNode),
+ router.NewGetRoute("/tasks", sr.getTasks),
+ router.NewGetRoute("/tasks/{id:.*}", sr.getTask),
+ }
+}
diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go
new file mode 100644
index 0000000000..50f823e07e
--- /dev/null
+++ b/api/server/router/swarm/cluster_routes.go
@@ -0,0 +1,229 @@
+package swarm
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/api/server/httputils"
+ basictypes "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ types "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ var req types.InitRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ return err
+ }
+ nodeID, err := sr.backend.Init(req)
+ if err != nil {
+ logrus.Errorf("Error initializing swarm: %v", err)
+ return err
+ }
+ return httputils.WriteJSON(w, http.StatusOK, nodeID)
+}
+
+func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ var req types.JoinRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ return err
+ }
+ return sr.backend.Join(req)
+}
+
+func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ if err := httputils.ParseForm(r); err != nil {
+ return err
+ }
+
+ force := httputils.BoolValue(r, "force")
+ return sr.backend.Leave(force)
+}
+
+func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ swarm, err := sr.backend.Inspect()
+ if err != nil {
+ logrus.Errorf("Error getting swarm: %v", err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusOK, swarm)
+}
+
+func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ var swarm types.Spec
+ if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil {
+ return err
+ }
+
+ rawVersion := r.URL.Query().Get("version")
+ version, err := strconv.ParseUint(rawVersion, 10, 64)
+ if err != nil {
+ return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
+ }
+
+ if err := sr.backend.Update(version, swarm); err != nil {
+ logrus.Errorf("Error configuring swarm: %v", err)
+ return err
+ }
+ return nil
+}
+
+func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ if err := httputils.ParseForm(r); err != nil {
+ return err
+ }
+ filter, err := filters.FromParam(r.Form.Get("filters"))
+ if err != nil {
+ return err
+ }
+
+ services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filter: filter})
+ if err != nil {
+ logrus.Errorf("Error getting services: %v", err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusOK, services)
+}
+
+func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ service, err := sr.backend.GetService(vars["id"])
+ if err != nil {
+ logrus.Errorf("Error getting service %s: %v", vars["id"], err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusOK, service)
+}
+
+func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ var service types.ServiceSpec
+ if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
+ return err
+ }
+
+ id, err := sr.backend.CreateService(service)
+ if err != nil {
+ logrus.Errorf("Error reating service %s: %v", id, err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ServiceCreateResponse{
+ ID: id,
+ })
+}
+
+func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ var service types.ServiceSpec
+ if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
+ return err
+ }
+
+ rawVersion := r.URL.Query().Get("version")
+ version, err := strconv.ParseUint(rawVersion, 10, 64)
+ if err != nil {
+ return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error())
+ }
+
+ if err := sr.backend.UpdateService(vars["id"], version, service); err != nil {
+ logrus.Errorf("Error updating service %s: %v", vars["id"], err)
+ return err
+ }
+ return nil
+}
+
+func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ if err := sr.backend.RemoveService(vars["id"]); err != nil {
+ logrus.Errorf("Error removing service %s: %v", vars["id"], err)
+ return err
+ }
+ return nil
+}
+
+func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ if err := httputils.ParseForm(r); err != nil {
+ return err
+ }
+ filter, err := filters.FromParam(r.Form.Get("filters"))
+ if err != nil {
+ return err
+ }
+
+ nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filter: filter})
+ if err != nil {
+ logrus.Errorf("Error getting nodes: %v", err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusOK, nodes)
+}
+
+func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ node, err := sr.backend.GetNode(vars["id"])
+ if err != nil {
+ logrus.Errorf("Error getting node %s: %v", vars["id"], err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusOK, node)
+}
+
+func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ var node types.NodeSpec
+ if err := json.NewDecoder(r.Body).Decode(&node); err != nil {
+ return err
+ }
+
+ rawVersion := r.URL.Query().Get("version")
+ version, err := strconv.ParseUint(rawVersion, 10, 64)
+ if err != nil {
+ return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error())
+ }
+
+ if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
+ logrus.Errorf("Error updating node %s: %v", vars["id"], err)
+ return err
+ }
+ return nil
+}
+
+func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ if err := sr.backend.RemoveNode(vars["id"]); err != nil {
+ logrus.Errorf("Error removing node %s: %v", vars["id"], err)
+ return err
+ }
+ return nil
+}
+
+func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ if err := httputils.ParseForm(r); err != nil {
+ return err
+ }
+ filter, err := filters.FromParam(r.Form.Get("filters"))
+ if err != nil {
+ return err
+ }
+
+ tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filter: filter})
+ if err != nil {
+ logrus.Errorf("Error getting tasks: %v", err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusOK, tasks)
+}
+
+func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ task, err := sr.backend.GetTask(vars["id"])
+ if err != nil {
+ logrus.Errorf("Error getting task %s: %v", vars["id"], err)
+ return err
+ }
+
+ return httputils.WriteJSON(w, http.StatusOK, task)
+}
diff --git a/api/server/router/system/system.go b/api/server/router/system/system.go
index b34b8c3211..e5742c9fe8 100644
--- a/api/server/router/system/system.go
+++ b/api/server/router/system/system.go
@@ -1,18 +1,23 @@
package system
-import "github.com/docker/docker/api/server/router"
+import (
+ "github.com/docker/docker/api/server/router"
+ "github.com/docker/docker/daemon/cluster"
+)
// systemRouter provides information about the Docker system overall.
// It gathers information about host, daemon and container events.
type systemRouter struct {
- backend Backend
- routes []router.Route
+ backend Backend
+ clusterProvider *cluster.Cluster
+ routes []router.Route
}
// NewRouter initializes a new system router
-func NewRouter(b Backend) router.Router {
+func NewRouter(b Backend, c *cluster.Cluster) router.Router {
r := &systemRouter{
- backend: b,
+ backend: b,
+ clusterProvider: c,
}
r.routes = []router.Route{
diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go
index f921de303b..8050301c9d 100644
--- a/api/server/router/system/system_routes.go
+++ b/api/server/router/system/system_routes.go
@@ -33,6 +33,9 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
if err != nil {
return err
}
+ if s.clusterProvider != nil {
+ info.Swarm = s.clusterProvider.Info()
+ }
return httputils.WriteJSON(w, http.StatusOK, info)
}
diff --git a/cli/cobraadaptor/adaptor.go b/cli/cobraadaptor/adaptor.go
index 4d2958b531..6f1a8876b3 100644
--- a/cli/cobraadaptor/adaptor.go
+++ b/cli/cobraadaptor/adaptor.go
@@ -5,7 +5,10 @@ import (
"github.com/docker/docker/api/client/container"
"github.com/docker/docker/api/client/image"
"github.com/docker/docker/api/client/network"
+ "github.com/docker/docker/api/client/node"
"github.com/docker/docker/api/client/registry"
+ "github.com/docker/docker/api/client/service"
+ "github.com/docker/docker/api/client/swarm"
"github.com/docker/docker/api/client/system"
"github.com/docker/docker/api/client/volume"
"github.com/docker/docker/cli"
@@ -36,6 +39,9 @@ func NewCobraAdaptor(clientFlags *cliflags.ClientFlags) CobraAdaptor {
rootCmd.SetFlagErrorFunc(cli.FlagErrorFunc)
rootCmd.SetOutput(stdout)
rootCmd.AddCommand(
+ node.NewNodeCommand(dockerCli),
+ service.NewServiceCommand(dockerCli),
+ swarm.NewSwarmCommand(dockerCli),
container.NewAttachCommand(dockerCli),
container.NewCommitCommand(dockerCli),
container.NewCreateCommand(dockerCli),
diff --git a/cli/usage.go b/cli/usage.go
index 73fa4f2245..3c3b321be6 100644
--- a/cli/usage.go
+++ b/cli/usage.go
@@ -11,7 +11,7 @@ var DockerCommandUsage = []Command{
{"cp", "Copy files/folders between a container and the local filesystem"},
{"exec", "Run a command in a running container"},
{"info", "Display system-wide information"},
- {"inspect", "Return low-level information on a container or image"},
+ {"inspect", "Return low-level information on a container, image or task"},
{"update", "Update configuration of one or more containers"},
}
diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go
index 63eb22b2b1..2b39aae8c4 100644
--- a/cmd/dockerd/daemon.go
+++ b/cmd/dockerd/daemon.go
@@ -20,12 +20,14 @@ import (
"github.com/docker/docker/api/server/router/container"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
+ swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
"github.com/docker/docker/builder/dockerfile"
cliflags "github.com/docker/docker/cli/flags"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/daemon"
+ "github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd"
@@ -208,6 +210,7 @@ func (cli *DaemonCli) start() (err error) {
}
api := apiserver.New(serverConfig)
+ cli.api = api
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
@@ -264,6 +267,17 @@ func (cli *DaemonCli) start() (err error) {
return fmt.Errorf("Error starting daemon: %v", err)
}
+ name, _ := os.Hostname()
+
+ c, err := cluster.New(cluster.Config{
+ Root: cli.Config.Root,
+ Name: name,
+ Backend: d,
+ })
+ if err != nil {
+ logrus.Fatalf("Error creating cluster component: %v", err)
+ }
+
logrus.Info("Daemon has completed initialization")
logrus.WithFields(logrus.Fields{
@@ -273,7 +287,7 @@ func (cli *DaemonCli) start() (err error) {
}).Info("Docker daemon")
cli.initMiddlewares(api, serverConfig)
- initRouter(api, d)
+ initRouter(api, d, c)
cli.d = d
cli.setupConfigReloadTrap()
@@ -290,6 +304,7 @@ func (cli *DaemonCli) start() (err error) {
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
+ c.Cleanup()
shutdownDaemon(d, 15)
containerdRemote.Cleanup()
if errAPI != nil {
@@ -385,18 +400,19 @@ func loadDaemonCliConfig(config *daemon.Config, flags *flag.FlagSet, commonConfi
return config, nil
}
-func initRouter(s *apiserver.Server, d *daemon.Daemon) {
+func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
decoder := runconfig.ContainerDecoder{}
routers := []router.Router{
container.NewRouter(d, decoder),
image.NewRouter(d, decoder),
- systemrouter.NewRouter(d),
+ systemrouter.NewRouter(d, c),
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)),
+ swarmrouter.NewRouter(c),
}
if d.NetworkControllerEnabled() {
- routers = append(routers, network.NewRouter(d))
+ routers = append(routers, network.NewRouter(d, c))
}
s.InitRouter(utils.IsDebugEnabled(), routers...)
diff --git a/container/container.go b/container/container.go
index 2ef31d53eb..1300d96d9c 100644
--- a/container/container.go
+++ b/container/container.go
@@ -66,6 +66,7 @@ type CommonContainer struct {
RWLayer layer.RWLayer `json:"-"`
ID string
Created time.Time
+ Managed bool
Path string
Args []string
Config *containertypes.Config
@@ -790,7 +791,7 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epC
ipam := epConfig.IPAMConfig
if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "") {
createOptions = append(createOptions,
- libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil))
+ libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil, nil))
}
for _, alias := range epConfig.Aliases {
@@ -798,6 +799,27 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epC
}
}
+ if container.NetworkSettings.Service != nil {
+ svcCfg := container.NetworkSettings.Service
+
+ var vip string
+ if svcCfg.VirtualAddresses[n.ID()] != nil {
+ vip = svcCfg.VirtualAddresses[n.ID()].IPv4
+ }
+
+ var portConfigs []*libnetwork.PortConfig
+ for _, portConfig := range svcCfg.ExposedPorts {
+ portConfigs = append(portConfigs, &libnetwork.PortConfig{
+ Name: portConfig.Name,
+ Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol),
+ TargetPort: portConfig.TargetPort,
+ PublishedPort: portConfig.PublishedPort,
+ })
+ }
+
+ createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs))
+ }
+
if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution())
}
diff --git a/container/state.go b/container/state.go
index 852ca1d0e5..b01bb3806c 100644
--- a/container/state.go
+++ b/container/state.go
@@ -5,6 +5,8 @@ import (
"sync"
"time"
+ "golang.org/x/net/context"
+
"github.com/docker/go-units"
)
@@ -139,6 +141,32 @@ func (s *State) WaitStop(timeout time.Duration) (int, error) {
return s.getExitCode(), nil
}
+// WaitWithContext waits for the container to stop. Optional context can be
+// passed for canceling the request.
+func (s *State) WaitWithContext(ctx context.Context) <-chan int {
+ // todo(tonistiigi): make other wait functions use this
+ c := make(chan int)
+ go func() {
+ s.Lock()
+ if !s.Running {
+ exitCode := s.ExitCode
+ s.Unlock()
+ c <- exitCode
+ close(c)
+ return
+ }
+ waitChan := s.waitChan
+ s.Unlock()
+ select {
+ case <-waitChan:
+ c <- s.getExitCode()
+ case <-ctx.Done():
+ }
+ close(c)
+ }()
+ return c
+}
+
// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
func (s *State) IsRunning() bool {
s.Lock()
diff --git a/daemon/cluster/cluster.go b/daemon/cluster/cluster.go
new file mode 100644
index 0000000000..1746e9b2cf
--- /dev/null
+++ b/daemon/cluster/cluster.go
@@ -0,0 +1,1056 @@
+package cluster
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/daemon/cluster/convert"
+ executorpkg "github.com/docker/docker/daemon/cluster/executor"
+ "github.com/docker/docker/daemon/cluster/executor/container"
+ "github.com/docker/docker/errors"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/runconfig"
+ apitypes "github.com/docker/engine-api/types"
+ types "github.com/docker/engine-api/types/swarm"
+ swarmagent "github.com/docker/swarmkit/agent"
+ swarmapi "github.com/docker/swarmkit/api"
+ "golang.org/x/net/context"
+)
+
+const swarmDirName = "swarm"
+const controlSocket = "control.sock"
+const swarmConnectTimeout = 5 * time.Second
+const stateFile = "docker-state.json"
+
+const (
+ initialReconnectDelay = 100 * time.Millisecond
+ maxReconnectDelay = 10 * time.Second
+)
+
+// ErrNoManager is returned then a manager-only function is called on non-manager
+var ErrNoManager = fmt.Errorf("this node is not participating as a Swarm manager")
+
+// ErrNoSwarm is returned on leaving a cluster that was never initialized
+var ErrNoSwarm = fmt.Errorf("this node is not part of Swarm")
+
+// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
+var ErrSwarmExists = fmt.Errorf("this node is already part of a Swarm")
+
+// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
+var ErrSwarmJoinTimeoutReached = fmt.Errorf("timeout reached before node was joined")
+
+type state struct {
+ ListenAddr string
+}
+
+// Config provides values for Cluster.
+type Config struct {
+ Root string
+ Name string
+ Backend executorpkg.Backend
+}
+
+// Cluster provides capabilities to pariticipate in a cluster as worker or a
+// manager and a worker.
+type Cluster struct {
+ sync.RWMutex
+ root string
+ config Config
+ configEvent chan struct{} // todo: make this array and goroutine safe
+ node *swarmagent.Node
+ conn *grpc.ClientConn
+ client swarmapi.ControlClient
+ ready bool
+ listenAddr string
+ err error
+ reconnectDelay time.Duration
+ stop bool
+ cancelDelay func()
+}
+
+// New creates a new Cluster instance using provided config.
+func New(config Config) (*Cluster, error) {
+ root := filepath.Join(config.Root, swarmDirName)
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return nil, err
+ }
+ c := &Cluster{
+ root: root,
+ config: config,
+ configEvent: make(chan struct{}, 10),
+ reconnectDelay: initialReconnectDelay,
+ }
+
+ dt, err := ioutil.ReadFile(filepath.Join(root, stateFile))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return c, nil
+ }
+ return nil, err
+ }
+
+ var st state
+ if err := json.Unmarshal(dt, &st); err != nil {
+ return nil, err
+ }
+
+ n, ctx, err := c.startNewNode(false, st.ListenAddr, "", "", "", false)
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ case <-time.After(swarmConnectTimeout):
+ logrus.Errorf("swarm component could not be started before timeout was reached")
+ case <-n.Ready(context.Background()):
+ case <-ctx.Done():
+ }
+ if ctx.Err() != nil {
+ return nil, fmt.Errorf("swarm component could not be started")
+ }
+ go c.reconnectOnFailure(ctx)
+ return c, nil
+}
+
+func (c *Cluster) checkCompatibility() error {
+ info, _ := c.config.Backend.SystemInfo()
+ if info != nil && (info.ClusterStore != "" || info.ClusterAdvertise != "") {
+ return fmt.Errorf("swarm mode is incompatible with `--cluster-store` and `--cluster-advertise daemon configuration")
+ }
+ return nil
+}
+
+func (c *Cluster) saveState() error {
+ dt, err := json.Marshal(state{ListenAddr: c.listenAddr})
+ if err != nil {
+ return err
+ }
+ return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600)
+}
+
+func (c *Cluster) reconnectOnFailure(ctx context.Context) {
+ for {
+ <-ctx.Done()
+ c.Lock()
+ if c.stop || c.node != nil {
+ c.Unlock()
+ return
+ }
+ c.reconnectDelay *= 2
+ if c.reconnectDelay > maxReconnectDelay {
+ c.reconnectDelay = maxReconnectDelay
+ }
+ logrus.Warnf("Restarting swarm in %.2f seconds", c.reconnectDelay.Seconds())
+ delayCtx, cancel := context.WithTimeout(context.Background(), c.reconnectDelay)
+ c.cancelDelay = cancel
+ c.Unlock()
+ <-delayCtx.Done()
+ if delayCtx.Err() != context.DeadlineExceeded {
+ return
+ }
+ c.Lock()
+ if c.node != nil {
+ c.Unlock()
+ return
+ }
+ var err error
+ _, ctx, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false)
+ if err != nil {
+ c.err = err
+ ctx = delayCtx
+ }
+ c.Unlock()
+ }
+}
+
+func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*swarmagent.Node, context.Context, error) {
+ if err := c.checkCompatibility(); err != nil {
+ return nil, nil, err
+ }
+ c.node = nil
+ c.cancelDelay = nil
+ node, err := swarmagent.NewNode(&swarmagent.NodeConfig{
+ Hostname: c.config.Name,
+ ForceNewCluster: forceNewCluster,
+ ListenControlAPI: filepath.Join(c.root, controlSocket),
+ ListenRemoteAPI: listenAddr,
+ JoinAddr: joinAddr,
+ StateDir: c.root,
+ CAHash: cahash,
+ Secret: secret,
+ Executor: container.NewExecutor(c.config.Backend),
+ HeartbeatTick: 1,
+ ElectionTick: 3,
+ IsManager: ismanager,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ if err := node.Start(ctx); err != nil {
+ return nil, nil, err
+ }
+
+ c.node = node
+ c.listenAddr = listenAddr
+ c.saveState()
+ c.config.Backend.SetClusterProvider(c)
+ go func() {
+ err := node.Err(ctx)
+ if err != nil {
+ logrus.Errorf("cluster exited with error: %v", err)
+ }
+ c.Lock()
+ c.conn = nil
+ c.client = nil
+ c.node = nil
+ c.ready = false
+ c.err = err
+ c.Unlock()
+ cancel()
+ }()
+
+ go func() {
+ select {
+ case <-node.Ready(context.Background()):
+ c.Lock()
+ c.reconnectDelay = initialReconnectDelay
+ c.Unlock()
+ case <-ctx.Done():
+ }
+ if ctx.Err() == nil {
+ c.Lock()
+ c.ready = true
+ c.err = nil
+ c.Unlock()
+ }
+ c.configEvent <- struct{}{}
+ }()
+
+ go func() {
+ for conn := range node.ListenControlSocket(ctx) {
+ c.Lock()
+ if c.conn != conn {
+ c.client = swarmapi.NewControlClient(conn)
+ }
+ if c.conn != nil {
+ c.client = nil
+ }
+ c.conn = conn
+ c.Unlock()
+ c.configEvent <- struct{}{}
+ }
+ }()
+
+ return node, ctx, nil
+}
+
+// Init initializes new cluster from user provided request.
+func (c *Cluster) Init(req types.InitRequest) (string, error) {
+ c.Lock()
+ if c.node != nil {
+ c.Unlock()
+ if !req.ForceNewCluster {
+ return "", ErrSwarmExists
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ if err := c.node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
+ return "", err
+ }
+ c.Lock()
+ c.node = nil
+ c.conn = nil
+ c.ready = false
+ }
+ // todo: check current state existing
+ n, ctx, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "", "", false)
+ if err != nil {
+ c.Unlock()
+ return "", err
+ }
+ c.Unlock()
+
+ select {
+ case <-n.Ready(context.Background()):
+ if err := initAcceptancePolicy(n, req.Spec.AcceptancePolicy); err != nil {
+ return "", err
+ }
+ go c.reconnectOnFailure(ctx)
+ return n.NodeID(), nil
+ case <-ctx.Done():
+ c.RLock()
+ defer c.RUnlock()
+ if c.err != nil {
+ if !req.ForceNewCluster { // if failure on first attempt don't keep state
+ if err := c.clearState(); err != nil {
+ return "", err
+ }
+ }
+ return "", c.err
+ }
+ return "", ctx.Err()
+ }
+}
+
+// Join makes current Cluster part of an existing swarm cluster.
+func (c *Cluster) Join(req types.JoinRequest) error {
+ c.Lock()
+ if c.node != nil {
+ c.Unlock()
+ return ErrSwarmExists
+ }
+ // todo: check current state existing
+ if len(req.RemoteAddrs) == 0 {
+ return fmt.Errorf("at least 1 RemoteAddr is required to join")
+ }
+ n, ctx, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.Secret, req.CACertHash, req.Manager)
+ if err != nil {
+ c.Unlock()
+ return err
+ }
+ c.Unlock()
+
+ select {
+ case <-time.After(swarmConnectTimeout):
+ go c.reconnectOnFailure(ctx)
+ if nodeid := n.NodeID(); nodeid != "" {
+ return fmt.Errorf("Timeout reached before node was joined. Your cluster settings may be preventing this node from automatically joining. To accept this node into cluster run `docker node accept %v` in an existing cluster manager", nodeid)
+ }
+ return ErrSwarmJoinTimeoutReached
+ case <-n.Ready(context.Background()):
+ go c.reconnectOnFailure(ctx)
+ return nil
+ case <-ctx.Done():
+ c.RLock()
+ defer c.RUnlock()
+ if c.err != nil {
+ return c.err
+ }
+ return ctx.Err()
+ }
+}
+
+func (c *Cluster) cancelReconnect() {
+ c.stop = true
+ if c.cancelDelay != nil {
+ c.cancelDelay()
+ c.cancelDelay = nil
+ }
+}
+
+// Leave shuts down Cluster and removes current state.
+func (c *Cluster) Leave(force bool) error {
+ c.Lock()
+ node := c.node
+ if node == nil {
+ c.Unlock()
+ return ErrNoSwarm
+ }
+
+ if node.Manager() != nil && !force {
+ msg := "You are attempting to leave cluster on a node that is participating as a manager. "
+ if c.isActiveManager() {
+ active, reachable, unreachable, err := c.managerStats()
+ if err == nil {
+ if active && reachable-2 <= unreachable {
+ if reachable == 1 && unreachable == 0 {
+ msg += "Leaving last manager will remove all current state of the cluster. Use `--force` to ignore this message. "
+ c.Unlock()
+ return fmt.Errorf(msg)
+ }
+ msg += fmt.Sprintf("Leaving cluster will leave you with %v managers out of %v. This means Raft quorum will be lost and your cluster will become inaccessible. ", reachable-1, reachable+unreachable)
+ }
+ }
+ } else {
+ msg += "Doing so may lose the consenus of your cluster. "
+ }
+
+ msg += "Only way to restore a cluster that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to ignore this message."
+ c.Unlock()
+ return fmt.Errorf(msg)
+ }
+ c.cancelReconnect()
+ c.Unlock()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
+ return err
+ }
+ nodeID := node.NodeID()
+ for _, id := range c.config.Backend.ListContainersForNode(nodeID) {
+ if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
+ logrus.Errorf("error removing %v: %v", id, err)
+ }
+ }
+ c.Lock()
+ defer c.Unlock()
+ c.node = nil
+ c.conn = nil
+ c.ready = false
+ c.configEvent <- struct{}{}
+ // todo: cleanup optional?
+ if err := c.clearState(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Cluster) clearState() error {
+ if err := os.RemoveAll(c.root); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(c.root, 0700); err != nil {
+ return err
+ }
+ c.config.Backend.SetClusterProvider(nil)
+ return nil
+}
+
+func (c *Cluster) getRequestContext() context.Context { // TODO: not needed when requests don't block on qourum lost
+ ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
+ return ctx
+}
+
+// Inspect retrives the confuguration properties of managed swarm cluster.
+func (c *Cluster) Inspect() (types.Swarm, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return types.Swarm{}, ErrNoManager
+ }
+
+ swarm, err := getSwarm(c.getRequestContext(), c.client)
+ if err != nil {
+ return types.Swarm{}, err
+ }
+
+ if err != nil {
+ return types.Swarm{}, err
+ }
+
+ return convert.SwarmFromGRPC(*swarm), nil
+}
+
+// Update updates configuration of a managed swarm cluster.
+func (c *Cluster) Update(version uint64, spec types.Spec) error {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return ErrNoManager
+ }
+
+ swarmSpec, err := convert.SwarmSpecToGRPC(spec)
+ if err != nil {
+ return err
+ }
+
+ swarm, err := getSwarm(c.getRequestContext(), c.client)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.client.UpdateCluster(
+ c.getRequestContext(),
+ &swarmapi.UpdateClusterRequest{
+ ClusterID: swarm.ID,
+ Spec: &swarmSpec,
+ ClusterVersion: &swarmapi.Version{
+ Index: version,
+ },
+ },
+ )
+ return err
+}
+
+// IsManager returns true is Cluster is participating as a manager.
+func (c *Cluster) IsManager() bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.isActiveManager()
+}
+
+// IsAgent returns true is Cluster is participating as a worker/agent.
+func (c *Cluster) IsAgent() bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.ready
+}
+
+// GetListenAddress returns the listening address for current maanger's
+// consensus and dispatcher APIs.
+func (c *Cluster) GetListenAddress() string {
+ c.RLock()
+ defer c.RUnlock()
+ if c.conn != nil {
+ return c.listenAddr
+ }
+ return ""
+}
+
+// GetRemoteAddress returns a known advertise address of a remote maanger if
+// available.
+// todo: change to array/connect with info
+func (c *Cluster) GetRemoteAddress() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.getRemoteAddress()
+}
+
+func (c *Cluster) getRemoteAddress() string {
+ if c.node == nil {
+ return ""
+ }
+ nodeID := c.node.NodeID()
+ for _, r := range c.node.Remotes() {
+ if r.NodeID != nodeID {
+ return r.Addr
+ }
+ }
+ return ""
+}
+
+// ListenClusterEvents returns a channel that receives messages on cluster
+// participation changes.
+// todo: make cancelable and accessible to multiple callers
+func (c *Cluster) ListenClusterEvents() <-chan struct{} {
+ return c.configEvent
+}
+
+// Info returns information about the current cluster state.
+func (c *Cluster) Info() types.Info {
+ var info types.Info
+ c.RLock()
+ defer c.RUnlock()
+
+ if c.node == nil {
+ info.LocalNodeState = types.LocalNodeStateInactive
+ if c.cancelDelay != nil {
+ info.LocalNodeState = types.LocalNodeStateError
+ }
+ } else {
+ info.LocalNodeState = types.LocalNodeStatePending
+ if c.ready == true {
+ info.LocalNodeState = types.LocalNodeStateActive
+ }
+ }
+ if c.err != nil {
+ info.Error = c.err.Error()
+ }
+
+ if c.isActiveManager() {
+ info.ControlAvailable = true
+ if r, err := c.client.ListNodes(c.getRequestContext(), &swarmapi.ListNodesRequest{}); err == nil {
+ info.Nodes = len(r.Nodes)
+ for _, n := range r.Nodes {
+ if n.ManagerStatus != nil {
+ info.Managers = info.Managers + 1
+ }
+ }
+ }
+
+ if swarm, err := getSwarm(c.getRequestContext(), c.client); err == nil && swarm != nil {
+ info.CACertHash = swarm.RootCA.CACertHash
+ }
+ }
+
+ if c.node != nil {
+ for _, r := range c.node.Remotes() {
+ info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr})
+ }
+ info.NodeID = c.node.NodeID()
+ }
+
+ return info
+}
+
+// isActiveManager should not be called without a read lock
+func (c *Cluster) isActiveManager() bool {
+ return c.conn != nil
+}
+
+// GetServices returns all services of a managed swarm cluster.
+func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return nil, ErrNoManager
+ }
+
+ filters, err := newListServicesFilters(options.Filter)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.client.ListServices(
+ c.getRequestContext(),
+ &swarmapi.ListServicesRequest{Filters: filters})
+ if err != nil {
+ return nil, err
+ }
+
+ var services []types.Service
+
+ for _, service := range r.Services {
+ services = append(services, convert.ServiceFromGRPC(*service))
+ }
+
+ return services, nil
+}
+
+// CreateService creates a new service in a managed swarm cluster.
+func (c *Cluster) CreateService(s types.ServiceSpec) (string, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return "", ErrNoManager
+ }
+
+ ctx := c.getRequestContext()
+
+ err := populateNetworkID(ctx, c.client, &s)
+ if err != nil {
+ return "", err
+ }
+
+ serviceSpec, err := convert.ServiceSpecToGRPC(s)
+ if err != nil {
+ return "", err
+ }
+ r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
+ if err != nil {
+ return "", err
+ }
+
+ return r.Service.ID, nil
+}
+
+// GetService returns a service based on a ID or name.
+func (c *Cluster) GetService(input string) (types.Service, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return types.Service{}, ErrNoManager
+ }
+
+ service, err := getService(c.getRequestContext(), c.client, input)
+ if err != nil {
+ return types.Service{}, err
+ }
+ return convert.ServiceFromGRPC(*service), nil
+}
+
+// UpdateService updates existing service to match new properties.
+func (c *Cluster) UpdateService(serviceID string, version uint64, spec types.ServiceSpec) error {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return ErrNoManager
+ }
+
+ serviceSpec, err := convert.ServiceSpecToGRPC(spec)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.client.UpdateService(
+ c.getRequestContext(),
+ &swarmapi.UpdateServiceRequest{
+ ServiceID: serviceID,
+ Spec: &serviceSpec,
+ ServiceVersion: &swarmapi.Version{
+ Index: version,
+ },
+ },
+ )
+ return err
+}
+
+// RemoveService removes a service from a managed swarm cluster.
+func (c *Cluster) RemoveService(input string) error {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return ErrNoManager
+ }
+
+ service, err := getService(c.getRequestContext(), c.client, input)
+ if err != nil {
+ return err
+ }
+
+ if _, err := c.client.RemoveService(c.getRequestContext(), &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil {
+ return err
+ }
+ return nil
+}
+
+// GetNodes returns a list of all nodes known to a cluster.
+func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return nil, ErrNoManager
+ }
+
+ filters, err := newListNodesFilters(options.Filter)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.client.ListNodes(
+ c.getRequestContext(),
+ &swarmapi.ListNodesRequest{Filters: filters})
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []types.Node{}
+
+ for _, node := range r.Nodes {
+ nodes = append(nodes, convert.NodeFromGRPC(*node))
+ }
+ return nodes, nil
+}
+
+// GetNode returns a node based on a ID or name.
+func (c *Cluster) GetNode(input string) (types.Node, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return types.Node{}, ErrNoManager
+ }
+
+ node, err := getNode(c.getRequestContext(), c.client, input)
+ if err != nil {
+ return types.Node{}, err
+ }
+ return convert.NodeFromGRPC(*node), nil
+}
+
+// UpdateNode updates existing nodes properties.
+func (c *Cluster) UpdateNode(nodeID string, version uint64, spec types.NodeSpec) error {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return ErrNoManager
+ }
+
+ nodeSpec, err := convert.NodeSpecToGRPC(spec)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.client.UpdateNode(
+ c.getRequestContext(),
+ &swarmapi.UpdateNodeRequest{
+ NodeID: nodeID,
+ Spec: &nodeSpec,
+ NodeVersion: &swarmapi.Version{
+ Index: version,
+ },
+ },
+ )
+ return err
+}
+
+// RemoveNode removes a node from a cluster
+func (c *Cluster) RemoveNode(input string) error {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return ErrNoManager
+ }
+
+ ctx := c.getRequestContext()
+
+ node, err := getNode(ctx, c.client, input)
+ if err != nil {
+ return err
+ }
+
+ if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID}); err != nil {
+ return err
+ }
+ return nil
+}
+
+// GetTasks returns a list of tasks matching the filter options.
+func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return nil, ErrNoManager
+ }
+
+ filters, err := newListTasksFilters(options.Filter)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.client.ListTasks(
+ c.getRequestContext(),
+ &swarmapi.ListTasksRequest{Filters: filters})
+ if err != nil {
+ return nil, err
+ }
+
+ tasks := []types.Task{}
+
+ for _, task := range r.Tasks {
+ tasks = append(tasks, convert.TaskFromGRPC(*task))
+ }
+ return tasks, nil
+}
+
+// GetTask returns a task by an ID.
+func (c *Cluster) GetTask(input string) (types.Task, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return types.Task{}, ErrNoManager
+ }
+
+ task, err := getTask(c.getRequestContext(), c.client, input)
+ if err != nil {
+ return types.Task{}, err
+ }
+ return convert.TaskFromGRPC(*task), nil
+}
+
+// GetNetwork returns a cluster network by ID.
+func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return apitypes.NetworkResource{}, ErrNoManager
+ }
+
+ network, err := getNetwork(c.getRequestContext(), c.client, input)
+ if err != nil {
+ return apitypes.NetworkResource{}, err
+ }
+ return convert.BasicNetworkFromGRPC(*network), nil
+}
+
+// GetNetworks returns all current cluster managed networks.
+func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return nil, ErrNoManager
+ }
+
+ r, err := c.client.ListNetworks(c.getRequestContext(), &swarmapi.ListNetworksRequest{})
+ if err != nil {
+ return nil, err
+ }
+
+ var networks []apitypes.NetworkResource
+
+ for _, network := range r.Networks {
+ networks = append(networks, convert.BasicNetworkFromGRPC(*network))
+ }
+
+ return networks, nil
+}
+
+// CreateNetwork creates a new cluster managed network.
+func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return "", ErrNoManager
+ }
+
+ if runconfig.IsPreDefinedNetwork(s.Name) {
+ err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name)
+ return "", errors.NewRequestForbiddenError(err)
+ }
+
+ networkSpec := convert.BasicNetworkCreateToGRPC(s)
+ r, err := c.client.CreateNetwork(c.getRequestContext(), &swarmapi.CreateNetworkRequest{Spec: &networkSpec})
+ if err != nil {
+ return "", err
+ }
+
+ return r.Network.ID, nil
+}
+
+// RemoveNetwork removes a cluster network.
+func (c *Cluster) RemoveNetwork(input string) error {
+ c.RLock()
+ defer c.RUnlock()
+
+ if !c.isActiveManager() {
+ return ErrNoManager
+ }
+
+ network, err := getNetwork(c.getRequestContext(), c.client, input)
+ if err != nil {
+ return err
+ }
+
+ if _, err := c.client.RemoveNetwork(c.getRequestContext(), &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil {
+ return err
+ }
+ return nil
+}
+
+func populateNetworkID(ctx context.Context, c swarmapi.ControlClient, s *types.ServiceSpec) error {
+ for i, n := range s.Networks {
+ apiNetwork, err := getNetwork(ctx, c, n.Target)
+ if err != nil {
+ return err
+ }
+ s.Networks[i] = types.NetworkAttachmentConfig{Target: apiNetwork.ID}
+ }
+ return nil
+}
+
+func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) {
+ // GetNetwork to match via full ID.
+ rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input})
+ if err != nil {
+ // If any error (including NotFound), ListNetworks to match via ID prefix and full name.
+ rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}})
+ if err != nil || len(rl.Networks) == 0 {
+ rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}})
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rl.Networks) == 0 {
+ return nil, fmt.Errorf("network %s not found", input)
+ }
+
+ if l := len(rl.Networks); l > 1 {
+ return nil, fmt.Errorf("network %s is ambigious (%d matches found)", input, l)
+ }
+
+ return rl.Networks[0], nil
+ }
+ return rg.Network, nil
+}
+
+// Cleanup stops active swarm node. This is run before daemon shutdown.
+func (c *Cluster) Cleanup() {
+ c.Lock()
+ node := c.node
+ if node == nil {
+ c.Unlock()
+ return
+ }
+
+ if c.isActiveManager() {
+ active, reachable, unreachable, err := c.managerStats()
+ if err == nil {
+ singlenode := active && reachable == 1 && unreachable == 0
+ if active && !singlenode && reachable-2 <= unreachable {
+ logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
+ }
+ }
+ }
+ c.cancelReconnect()
+ c.Unlock()
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ if err := node.Stop(ctx); err != nil {
+ logrus.Errorf("error cleaning up cluster: %v", err)
+ }
+ c.Lock()
+ c.node = nil
+ c.ready = false
+ c.conn = nil
+ c.Unlock()
+}
+
+func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) {
+ ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
+ nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{})
+ if err != nil {
+ return false, 0, 0, err
+ }
+ for _, n := range nodes.Nodes {
+ if n.ManagerStatus != nil {
+ if n.ManagerStatus.Raft.Status.Reachability == swarmapi.RaftMemberStatus_REACHABLE {
+ reachable++
+ if n.ID == c.node.NodeID() {
+ current = true
+ }
+ }
+ if n.ManagerStatus.Raft.Status.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE {
+ unreachable++
+ }
+ }
+ }
+ return
+}
+
+func initAcceptancePolicy(node *swarmagent.Node, acceptancePolicy types.AcceptancePolicy) error {
+ ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
+ for conn := range node.ListenControlSocket(ctx) {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if conn != nil {
+ client := swarmapi.NewControlClient(conn)
+ var cluster *swarmapi.Cluster
+ for i := 0; ; i++ {
+ lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{})
+ if err != nil {
+ return fmt.Errorf("error on listing clusters: %v", err)
+ }
+ if len(lcr.Clusters) == 0 {
+ if i < 10 {
+ time.Sleep(200 * time.Millisecond)
+ continue
+ }
+ return fmt.Errorf("empty list of clusters was returned")
+ }
+ cluster = lcr.Clusters[0]
+ break
+ }
+ spec := &cluster.Spec
+
+ if err := convert.SwarmSpecUpdateAcceptancePolicy(spec, acceptancePolicy); err != nil {
+ return fmt.Errorf("error updating cluster settings: %v", err)
+ }
+ _, err := client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
+ ClusterID: cluster.ID,
+ ClusterVersion: &cluster.Meta.Version,
+ Spec: spec,
+ })
+ if err != nil {
+ return fmt.Errorf("error updating cluster settings: %v", err)
+ }
+ return nil
+ }
+ }
+ return ctx.Err()
+}
diff --git a/daemon/cluster/convert/container.go b/daemon/cluster/convert/container.go
new file mode 100644
index 0000000000..c943537ad4
--- /dev/null
+++ b/daemon/cluster/convert/container.go
@@ -0,0 +1,116 @@
+package convert
+
+import (
+ "fmt"
+ "strings"
+
+ types "github.com/docker/engine-api/types/swarm"
+ swarmapi "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+)
+
+func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
+ containerSpec := types.ContainerSpec{
+ Image: c.Image,
+ Labels: c.Labels,
+ Command: c.Command,
+ Args: c.Args,
+ Env: c.Env,
+ Dir: c.Dir,
+ User: c.User,
+ }
+
+ // Mounts
+ for _, m := range c.Mounts {
+ mount := types.Mount{
+ Target: m.Target,
+ Source: m.Source,
+ Type: types.MountType(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])),
+ Writable: m.Writable,
+ }
+
+ if m.BindOptions != nil {
+ mount.BindOptions = &types.BindOptions{
+ Propagation: types.MountPropagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])),
+ }
+ }
+
+ if m.VolumeOptions != nil {
+ mount.VolumeOptions = &types.VolumeOptions{
+ Populate: m.VolumeOptions.Populate,
+ Labels: m.VolumeOptions.Labels,
+ }
+ if m.VolumeOptions.DriverConfig != nil {
+ mount.VolumeOptions.DriverConfig = &types.Driver{
+ Name: m.VolumeOptions.DriverConfig.Name,
+ Options: m.VolumeOptions.DriverConfig.Options,
+ }
+ }
+ }
+ containerSpec.Mounts = append(containerSpec.Mounts, mount)
+ }
+
+ if c.StopGracePeriod != nil {
+ grace, _ := ptypes.Duration(c.StopGracePeriod)
+ containerSpec.StopGracePeriod = &grace
+ }
+ return containerSpec
+}
+
+func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
+ containerSpec := &swarmapi.ContainerSpec{
+ Image: c.Image,
+ Labels: c.Labels,
+ Command: c.Command,
+ Args: c.Args,
+ Env: c.Env,
+ Dir: c.Dir,
+ User: c.User,
+ }
+
+ if c.StopGracePeriod != nil {
+ containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod)
+ }
+
+ // Mounts
+ for _, m := range c.Mounts {
+ mount := swarmapi.Mount{
+ Target: m.Target,
+ Source: m.Source,
+ Writable: m.Writable,
+ }
+
+ if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok {
+ mount.Type = swarmapi.Mount_MountType(mountType)
+ } else if string(m.Type) != "" {
+ return nil, fmt.Errorf("invalid MountType: %q", m.Type)
+ }
+
+ if m.BindOptions != nil {
+ if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok {
+ mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)}
+ } else if string(m.BindOptions.Propagation) != "" {
+ return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation)
+
+ }
+
+ }
+
+ if m.VolumeOptions != nil {
+ mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
+ Populate: m.VolumeOptions.Populate,
+ Labels: m.VolumeOptions.Labels,
+ }
+ if m.VolumeOptions.DriverConfig != nil {
+ mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
+ Name: m.VolumeOptions.DriverConfig.Name,
+ Options: m.VolumeOptions.DriverConfig.Options,
+ }
+ }
+ }
+
+ containerSpec.Mounts = append(containerSpec.Mounts, mount)
+ }
+
+ return containerSpec, nil
+}
diff --git a/daemon/cluster/convert/network.go b/daemon/cluster/convert/network.go
new file mode 100644
index 0000000000..53b952427a
--- /dev/null
+++ b/daemon/cluster/convert/network.go
@@ -0,0 +1,194 @@
+package convert
+
+import (
+ "strings"
+
+ basictypes "github.com/docker/engine-api/types"
+ networktypes "github.com/docker/engine-api/types/network"
+ types "github.com/docker/engine-api/types/swarm"
+ swarmapi "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+)
+
+func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
+ if na != nil {
+ return types.NetworkAttachment{
+ Network: networkFromGRPC(na.Network),
+ Addresses: na.Addresses,
+ }
+ }
+ return types.NetworkAttachment{}
+}
+
+func networkFromGRPC(n *swarmapi.Network) types.Network {
+ if n != nil {
+ network := types.Network{
+ ID: n.ID,
+ Spec: types.NetworkSpec{
+ IPv6Enabled: n.Spec.Ipv6Enabled,
+ Internal: n.Spec.Internal,
+ IPAMOptions: ipamFromGRPC(n.Spec.IPAM),
+ },
+ IPAMOptions: ipamFromGRPC(n.IPAM),
+ }
+
+ // Meta
+ network.Version.Index = n.Meta.Version.Index
+ network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
+ network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
+
+ //Annotations
+ network.Spec.Name = n.Spec.Annotations.Name
+ network.Spec.Labels = n.Spec.Annotations.Labels
+
+ //DriverConfiguration
+ if n.Spec.DriverConfig != nil {
+ network.Spec.DriverConfiguration = &types.Driver{
+ Name: n.Spec.DriverConfig.Name,
+ Options: n.Spec.DriverConfig.Options,
+ }
+ }
+
+ //DriverState
+ if n.DriverState != nil {
+ network.DriverState = types.Driver{
+ Name: n.DriverState.Name,
+ Options: n.DriverState.Options,
+ }
+ }
+
+ return network
+ }
+ return types.Network{}
+}
+
+func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions {
+ var ipam *types.IPAMOptions
+ if i != nil {
+ ipam = &types.IPAMOptions{}
+ if i.Driver != nil {
+ ipam.Driver.Name = i.Driver.Name
+ ipam.Driver.Options = i.Driver.Options
+ }
+
+ for _, config := range i.Configs {
+ ipam.Configs = append(ipam.Configs, types.IPAMConfig{
+ Subnet: config.Subnet,
+ Range: config.Range,
+ Gateway: config.Gateway,
+ })
+ }
+ }
+ return ipam
+}
+
+func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec {
+ var endpointSpec *types.EndpointSpec
+ if es != nil {
+ endpointSpec = &types.EndpointSpec{}
+ endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String()))
+
+ for _, portState := range es.Ports {
+ endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{
+ Name: portState.Name,
+ Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
+ TargetPort: portState.TargetPort,
+ PublishedPort: portState.PublishedPort,
+ })
+ }
+ }
+ return endpointSpec
+}
+
+func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint {
+ endpoint := types.Endpoint{}
+ if e != nil {
+ if espec := endpointSpecFromGRPC(e.Spec); espec != nil {
+ endpoint.Spec = *espec
+ }
+
+ for _, portState := range e.Ports {
+ endpoint.Ports = append(endpoint.Ports, types.PortConfig{
+ Name: portState.Name,
+ Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
+ TargetPort: portState.TargetPort,
+ PublishedPort: portState.PublishedPort,
+ })
+ }
+
+ for _, v := range e.VirtualIPs {
+ endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{
+ NetworkID: v.NetworkID,
+ Addr: v.Addr})
+ }
+
+ }
+
+ return endpoint
+}
+
+// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource.
+func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource {
+ spec := n.Spec
+ var ipam networktypes.IPAM
+ if spec.IPAM != nil {
+ if spec.IPAM.Driver != nil {
+ ipam.Driver = spec.IPAM.Driver.Name
+ ipam.Options = spec.IPAM.Driver.Options
+ }
+ ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs))
+ for _, ic := range spec.IPAM.Configs {
+ ipamConfig := networktypes.IPAMConfig{
+ Subnet: ic.Subnet,
+ IPRange: ic.Range,
+ Gateway: ic.Gateway,
+ AuxAddress: ic.Reserved,
+ }
+ ipam.Config = append(ipam.Config, ipamConfig)
+ }
+ }
+
+ return basictypes.NetworkResource{
+ ID: n.ID,
+ Name: n.Spec.Annotations.Name,
+ Scope: "swarm",
+ Driver: n.DriverState.Name,
+ EnableIPv6: spec.Ipv6Enabled,
+ IPAM: ipam,
+ Internal: spec.Internal,
+ Options: n.DriverState.Options,
+ Labels: n.Spec.Annotations.Labels,
+ }
+}
+
+// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec.
+func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec {
+ ns := swarmapi.NetworkSpec{
+ Annotations: swarmapi.Annotations{
+ Name: create.Name,
+ Labels: create.Labels,
+ },
+ DriverConfig: &swarmapi.Driver{
+ Name: create.Driver,
+ Options: create.Options,
+ },
+ Ipv6Enabled: create.EnableIPv6,
+ Internal: create.Internal,
+ IPAM: &swarmapi.IPAMOptions{
+ Driver: &swarmapi.Driver{
+ Name: create.IPAM.Driver,
+ Options: create.IPAM.Options,
+ },
+ },
+ }
+ ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config))
+ for _, ipamConfig := range create.IPAM.Config {
+ ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{
+ Subnet: ipamConfig.Subnet,
+ Range: ipamConfig.IPRange,
+ Gateway: ipamConfig.Gateway,
+ })
+ }
+ ns.IPAM.Configs = ipamSpec
+ return ns
+}
diff --git a/daemon/cluster/convert/node.go b/daemon/cluster/convert/node.go
new file mode 100644
index 0000000000..fb15b2b5fa
--- /dev/null
+++ b/daemon/cluster/convert/node.go
@@ -0,0 +1,95 @@
+package convert
+
+import (
+ "fmt"
+ "strings"
+
+ types "github.com/docker/engine-api/types/swarm"
+ swarmapi "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+)
+
+// NodeFromGRPC converts a grpc Node to a Node.
+func NodeFromGRPC(n swarmapi.Node) types.Node {
+ node := types.Node{
+ ID: n.ID,
+ Spec: types.NodeSpec{
+ Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())),
+ Membership: types.NodeMembership(strings.ToLower(n.Spec.Membership.String())),
+ Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())),
+ },
+ Status: types.NodeStatus{
+ State: types.NodeState(strings.ToLower(n.Status.State.String())),
+ Message: n.Status.Message,
+ },
+ }
+
+ // Meta
+ node.Version.Index = n.Meta.Version.Index
+ node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
+ node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
+
+ //Annotations
+ node.Spec.Name = n.Spec.Annotations.Name
+ node.Spec.Labels = n.Spec.Annotations.Labels
+
+ //Description
+ if n.Description != nil {
+ node.Description.Hostname = n.Description.Hostname
+ if n.Description.Platform != nil {
+ node.Description.Platform.Architecture = n.Description.Platform.Architecture
+ node.Description.Platform.OS = n.Description.Platform.OS
+ }
+ if n.Description.Resources != nil {
+ node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs
+ node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes
+ }
+ if n.Description.Engine != nil {
+ node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion
+ node.Description.Engine.Labels = n.Description.Engine.Labels
+ for _, plugin := range n.Description.Engine.Plugins {
+ node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name})
+ }
+ }
+ }
+
+ //Manager
+ if n.ManagerStatus != nil {
+ node.ManagerStatus = &types.ManagerStatus{
+ Leader: n.ManagerStatus.Raft.Status.Leader,
+ Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Raft.Status.Reachability.String())),
+ Addr: n.ManagerStatus.Raft.Addr,
+ }
+ }
+
+ return node
+}
+
+// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec.
+func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) {
+ spec := swarmapi.NodeSpec{
+ Annotations: swarmapi.Annotations{
+ Name: s.Name,
+ Labels: s.Labels,
+ },
+ }
+ if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok {
+ spec.Role = swarmapi.NodeRole(role)
+ } else {
+ return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role)
+ }
+
+ if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(string(s.Membership))]; ok {
+ spec.Membership = swarmapi.NodeSpec_Membership(membership)
+ } else {
+ return swarmapi.NodeSpec{}, fmt.Errorf("invalid Membership: %q", s.Membership)
+ }
+
+ if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok {
+ spec.Availability = swarmapi.NodeSpec_Availability(availability)
+ } else {
+ return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability)
+ }
+
+ return spec, nil
+}
diff --git a/daemon/cluster/convert/service.go b/daemon/cluster/convert/service.go
new file mode 100644
index 0000000000..60df93a59e
--- /dev/null
+++ b/daemon/cluster/convert/service.go
@@ -0,0 +1,252 @@
+package convert
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/pkg/namesgenerator"
+ types "github.com/docker/engine-api/types/swarm"
+ swarmapi "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+)
+
+// ServiceFromGRPC converts a grpc Service to a Service.
+func ServiceFromGRPC(s swarmapi.Service) types.Service {
+ spec := s.Spec
+ containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container
+
+ networks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks))
+ for _, n := range spec.Networks {
+ networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
+ }
+ service := types.Service{
+ ID: s.ID,
+
+ Spec: types.ServiceSpec{
+ TaskTemplate: types.TaskSpec{
+ ContainerSpec: containerSpecFromGRPC(containerConfig),
+ Resources: resourcesFromGRPC(s.Spec.Task.Resources),
+ RestartPolicy: restartPolicyFromGRPC(s.Spec.Task.Restart),
+ Placement: placementFromGRPC(s.Spec.Task.Placement),
+ },
+
+ Networks: networks,
+ EndpointSpec: endpointSpecFromGRPC(s.Spec.Endpoint),
+ },
+ Endpoint: endpointFromGRPC(s.Endpoint),
+ }
+
+ // Meta
+ service.Version.Index = s.Meta.Version.Index
+ service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt)
+ service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt)
+
+ // Annotations
+ service.Spec.Name = s.Spec.Annotations.Name
+ service.Spec.Labels = s.Spec.Annotations.Labels
+
+ // UpdateConfig
+ if s.Spec.Update != nil {
+ service.Spec.UpdateConfig = &types.UpdateConfig{
+ Parallelism: s.Spec.Update.Parallelism,
+ }
+
+ service.Spec.UpdateConfig.Delay, _ = ptypes.Duration(&s.Spec.Update.Delay)
+ }
+
+ //Mode
+ switch t := s.Spec.GetMode().(type) {
+ case *swarmapi.ServiceSpec_Global:
+ service.Spec.Mode.Global = &types.GlobalService{}
+ case *swarmapi.ServiceSpec_Replicated:
+ service.Spec.Mode.Replicated = &types.ReplicatedService{
+ Replicas: &t.Replicated.Replicas,
+ }
+ }
+
+ return service
+}
+
+// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec.
+func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
+ name := s.Name
+ if name == "" {
+ name = namesgenerator.GetRandomName(0)
+ }
+
+ networks := make([]*swarmapi.ServiceSpec_NetworkAttachmentConfig, 0, len(s.Networks))
+ for _, n := range s.Networks {
+ networks = append(networks, &swarmapi.ServiceSpec_NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
+ }
+
+ spec := swarmapi.ServiceSpec{
+ Annotations: swarmapi.Annotations{
+ Name: name,
+ Labels: s.Labels,
+ },
+ Task: swarmapi.TaskSpec{
+ Resources: resourcesToGRPC(s.TaskTemplate.Resources),
+ },
+ Networks: networks,
+ }
+
+ containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
+ if err != nil {
+ return swarmapi.ServiceSpec{}, err
+ }
+ spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}
+
+ restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy)
+ if err != nil {
+ return swarmapi.ServiceSpec{}, err
+ }
+ spec.Task.Restart = restartPolicy
+
+ if s.TaskTemplate.Placement != nil {
+ spec.Task.Placement = &swarmapi.Placement{
+ Constraints: s.TaskTemplate.Placement.Constraints,
+ }
+ }
+
+ if s.UpdateConfig != nil {
+ spec.Update = &swarmapi.UpdateConfig{
+ Parallelism: s.UpdateConfig.Parallelism,
+ Delay: *ptypes.DurationProto(s.UpdateConfig.Delay),
+ }
+ }
+
+ if s.EndpointSpec != nil {
+ if s.EndpointSpec.Mode != "" &&
+ s.EndpointSpec.Mode != types.ResolutionModeVIP &&
+ s.EndpointSpec.Mode != types.ResolutionModeDNSRR {
+ return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode)
+ }
+
+ spec.Endpoint = &swarmapi.EndpointSpec{}
+
+ spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))])
+
+ for _, portConfig := range s.EndpointSpec.Ports {
+ spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{
+ Name: portConfig.Name,
+ Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]),
+ TargetPort: portConfig.TargetPort,
+ PublishedPort: portConfig.PublishedPort,
+ })
+ }
+ }
+
+ //Mode
+ if s.Mode.Global != nil {
+ spec.Mode = &swarmapi.ServiceSpec_Global{
+ Global: &swarmapi.GlobalService{},
+ }
+ } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil {
+ spec.Mode = &swarmapi.ServiceSpec_Replicated{
+ Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas},
+ }
+ } else {
+ spec.Mode = &swarmapi.ServiceSpec_Replicated{
+ Replicated: &swarmapi.ReplicatedService{Replicas: 1},
+ }
+ }
+
+ return spec, nil
+}
+
+func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements {
+ var resources *types.ResourceRequirements
+ if res != nil {
+ resources = &types.ResourceRequirements{}
+ if res.Limits != nil {
+ resources.Limits = &types.Resources{
+ NanoCPUs: res.Limits.NanoCPUs,
+ MemoryBytes: res.Limits.MemoryBytes,
+ }
+ }
+ if res.Reservations != nil {
+ resources.Reservations = &types.Resources{
+ NanoCPUs: res.Reservations.NanoCPUs,
+ MemoryBytes: res.Reservations.MemoryBytes,
+ }
+ }
+ }
+
+ return resources
+}
+
+func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements {
+ var reqs *swarmapi.ResourceRequirements
+ if res != nil {
+ reqs = &swarmapi.ResourceRequirements{}
+ if res.Limits != nil {
+ reqs.Limits = &swarmapi.Resources{
+ NanoCPUs: res.Limits.NanoCPUs,
+ MemoryBytes: res.Limits.MemoryBytes,
+ }
+ }
+ if res.Reservations != nil {
+ reqs.Reservations = &swarmapi.Resources{
+ NanoCPUs: res.Reservations.NanoCPUs,
+ MemoryBytes: res.Reservations.MemoryBytes,
+ }
+
+ }
+ }
+ return reqs
+}
+
+func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy {
+ var rp *types.RestartPolicy
+ if p != nil {
+ rp = &types.RestartPolicy{}
+ rp.Condition = types.RestartPolicyCondition(strings.ToLower(p.Condition.String()))
+ if p.Delay != nil {
+ delay, _ := ptypes.Duration(p.Delay)
+ rp.Delay = &delay
+ }
+ if p.Window != nil {
+ window, _ := ptypes.Duration(p.Window)
+ rp.Window = &window
+ }
+
+ rp.MaxAttempts = &p.MaxAttempts
+ }
+ return rp
+}
+
+func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) {
+ var rp *swarmapi.RestartPolicy
+ if p != nil {
+ rp = &swarmapi.RestartPolicy{}
+ if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[strings.ToUpper(string(p.Condition))]; ok {
+ rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition)
+ } else if string(p.Condition) == "" {
+ rp.Condition = swarmapi.RestartOnAny
+ } else {
+ return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition)
+ }
+
+ if p.Delay != nil {
+ rp.Delay = ptypes.DurationProto(*p.Delay)
+ }
+ if p.Window != nil {
+ rp.Window = ptypes.DurationProto(*p.Window)
+ }
+ if p.MaxAttempts != nil {
+ rp.MaxAttempts = *p.MaxAttempts
+
+ }
+ }
+ return rp, nil
+}
+
+func placementFromGRPC(p *swarmapi.Placement) *types.Placement {
+ var r *types.Placement
+ if p != nil {
+ r = &types.Placement{}
+ r.Constraints = p.Constraints
+ }
+
+ return r
+}
diff --git a/daemon/cluster/convert/swarm.go b/daemon/cluster/convert/swarm.go
new file mode 100644
index 0000000000..cb9d7d0821
--- /dev/null
+++ b/daemon/cluster/convert/swarm.go
@@ -0,0 +1,116 @@
+package convert
+
+import (
+ "fmt"
+ "strings"
+
+ "golang.org/x/crypto/bcrypt"
+
+ types "github.com/docker/engine-api/types/swarm"
+ swarmapi "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+)
+
+// SwarmFromGRPC converts a grpc Cluster to a Swarm.
+func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
+ swarm := types.Swarm{
+ ID: c.ID,
+ Spec: types.Spec{
+ Orchestration: types.OrchestrationConfig{
+ TaskHistoryRetentionLimit: c.Spec.Orchestration.TaskHistoryRetentionLimit,
+ },
+ Raft: types.RaftConfig{
+ SnapshotInterval: c.Spec.Raft.SnapshotInterval,
+ KeepOldSnapshots: c.Spec.Raft.KeepOldSnapshots,
+ LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers,
+ HeartbeatTick: c.Spec.Raft.HeartbeatTick,
+ ElectionTick: c.Spec.Raft.ElectionTick,
+ },
+ Dispatcher: types.DispatcherConfig{
+ HeartbeatPeriod: c.Spec.Dispatcher.HeartbeatPeriod,
+ },
+ },
+ }
+
+ swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry)
+
+ // Meta
+ swarm.Version.Index = c.Meta.Version.Index
+ swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt)
+ swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt)
+
+ // Annotations
+ swarm.Spec.Name = c.Spec.Annotations.Name
+ swarm.Spec.Labels = c.Spec.Annotations.Labels
+
+ for _, policy := range c.Spec.AcceptancePolicy.Policies {
+ p := types.Policy{
+ Role: types.NodeRole(strings.ToLower(policy.Role.String())),
+ Autoaccept: policy.Autoaccept,
+ }
+ if policy.Secret != nil {
+ p.Secret = string(policy.Secret.Data)
+ }
+ swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p)
+ }
+
+ return swarm
+}
+
+// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec.
+func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
+ spec := swarmapi.ClusterSpec{
+ Annotations: swarmapi.Annotations{
+ Name: s.Name,
+ Labels: s.Labels,
+ },
+ Orchestration: swarmapi.OrchestrationConfig{
+ TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit,
+ },
+ Raft: swarmapi.RaftConfig{
+ SnapshotInterval: s.Raft.SnapshotInterval,
+ KeepOldSnapshots: s.Raft.KeepOldSnapshots,
+ LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers,
+ HeartbeatTick: s.Raft.HeartbeatTick,
+ ElectionTick: s.Raft.ElectionTick,
+ },
+ Dispatcher: swarmapi.DispatcherConfig{
+ HeartbeatPeriod: s.Dispatcher.HeartbeatPeriod,
+ },
+ CAConfig: swarmapi.CAConfig{
+ NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry),
+ },
+ }
+
+ if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy); err != nil {
+ return swarmapi.ClusterSpec{}, err
+ }
+ return spec, nil
+}
+
+// SwarmSpecUpdateAcceptancePolicy updates a grpc ClusterSpec using AcceptancePolicy.
+func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy) error {
+ spec.AcceptancePolicy.Policies = nil
+ for _, p := range acceptancePolicy.Policies {
+ role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(p.Role))]
+ if !ok {
+ return fmt.Errorf("invalid Role: %q", p.Role)
+ }
+
+ policy := &swarmapi.AcceptancePolicy_RoleAdmissionPolicy{
+ Role: swarmapi.NodeRole(role),
+ Autoaccept: p.Autoaccept,
+ }
+
+ if p.Secret != "" {
+ hashPwd, _ := bcrypt.GenerateFromPassword([]byte(p.Secret), 0)
+ policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
+ Data: hashPwd,
+ Alg: "bcrypt",
+ }
+ }
+
+ spec.AcceptancePolicy.Policies = append(spec.AcceptancePolicy.Policies, policy)
+ }
+ return nil
+}
diff --git a/daemon/cluster/convert/task.go b/daemon/cluster/convert/task.go
new file mode 100644
index 0000000000..b701ae36cf
--- /dev/null
+++ b/daemon/cluster/convert/task.go
@@ -0,0 +1,53 @@
+package convert
+
+import (
+ "strings"
+
+ types "github.com/docker/engine-api/types/swarm"
+ swarmapi "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+)
+
+// TaskFromGRPC converts a grpc Task to a Task.
+func TaskFromGRPC(t swarmapi.Task) types.Task {
+ containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container
+ containerStatus := t.Status.GetContainer()
+ task := types.Task{
+ ID: t.ID,
+ ServiceID: t.ServiceID,
+ Slot: int(t.Slot),
+ NodeID: t.NodeID,
+ Spec: types.TaskSpec{
+ ContainerSpec: containerSpecFromGRPC(containerConfig),
+ Resources: resourcesFromGRPC(t.Spec.Resources),
+ RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart),
+ Placement: placementFromGRPC(t.Spec.Placement),
+ },
+ Status: types.TaskStatus{
+ State: types.TaskState(strings.ToLower(t.Status.State.String())),
+ Message: t.Status.Message,
+ Err: t.Status.Err,
+ },
+ DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())),
+ }
+
+ // Meta
+ task.Version.Index = t.Meta.Version.Index
+ task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt)
+ task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt)
+
+ task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp)
+
+ if containerStatus != nil {
+ task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID
+ task.Status.ContainerStatus.PID = int(containerStatus.PID)
+ task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode)
+ }
+
+ // NetworksAttachments
+ for _, na := range t.Networks {
+ task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na))
+ }
+
+ return task
+}
diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go
new file mode 100644
index 0000000000..6b0d0e5a48
--- /dev/null
+++ b/daemon/cluster/executor/backend.go
@@ -0,0 +1,35 @@
+package executor
+
+import (
+ "io"
+
+ clustertypes "github.com/docker/docker/daemon/cluster/provider"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/container"
+ "github.com/docker/engine-api/types/network"
+ "github.com/docker/libnetwork/cluster"
+ networktypes "github.com/docker/libnetwork/types"
+ "golang.org/x/net/context"
+)
+
+// Backend defines the executor component for a swarm agent.
+type Backend interface {
+ CreateManagedNetwork(clustertypes.NetworkCreateRequest) error
+ DeleteManagedNetwork(name string) error
+ SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error
+ PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
+ CreateManagedContainer(types.ContainerCreateConfig) (types.ContainerCreateResponse, error)
+ ContainerStart(name string, hostConfig *container.HostConfig) error
+ ContainerStop(name string, seconds int) error
+ ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
+ UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error
+ ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error)
+ ContainerWaitWithContext(ctx context.Context, name string) (<-chan int, error)
+ ContainerRm(name string, config *types.ContainerRmConfig) error
+ ContainerKill(name string, sig uint64) error
+ SystemInfo() (*types.Info, error)
+ VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
+ ListContainersForNode(nodeID string) []string
+ SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error
+ SetClusterProvider(provider cluster.Provider)
+}
diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go
new file mode 100644
index 0000000000..c9751caeff
--- /dev/null
+++ b/daemon/cluster/executor/container/adapter.go
@@ -0,0 +1,229 @@
+package container
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ executorpkg "github.com/docker/docker/daemon/cluster/executor"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/libnetwork"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+// containerAdapter conducts remote operations for a container. All calls
+// are mostly naked calls to the client API, seeded with information from
+// containerConfig.
+type containerAdapter struct {
+ backend executorpkg.Backend
+ container *containerConfig
+}
+
+func newContainerAdapter(b executorpkg.Backend, task *api.Task) (*containerAdapter, error) {
+ ctnr, err := newContainerConfig(task)
+ if err != nil {
+ return nil, err
+ }
+
+ return &containerAdapter{
+ container: ctnr,
+ backend: b,
+ }, nil
+}
+
+func (c *containerAdapter) pullImage(ctx context.Context) error {
+ // if the image needs to be pulled, the auth config will be retrieved and updated
+ encodedAuthConfig := c.container.task.ServiceAnnotations.Labels[fmt.Sprintf("%v.registryauth", systemLabelPrefix)]
+
+ authConfig := &types.AuthConfig{}
+ if encodedAuthConfig != "" {
+ if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
+ logrus.Warnf("invalid authconfig: %v", err)
+ }
+ }
+
+ pr, pw := io.Pipe()
+ metaHeaders := map[string][]string{}
+ go func() {
+ err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw)
+ pw.CloseWithError(err)
+ }()
+
+ dec := json.NewDecoder(pr)
+ m := map[string]interface{}{}
+ for {
+ if err := dec.Decode(&m); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+ // TOOD(stevvooe): Report this status somewhere.
+ logrus.Debugln("pull progress", m)
+ }
+ // if the final stream object contained an error, return it
+ if errMsg, ok := m["error"]; ok {
+ return fmt.Errorf("%v", errMsg)
+ }
+ return nil
+}
+
+func (c *containerAdapter) createNetworks(ctx context.Context) error {
+ for _, network := range c.container.networks() {
+ ncr, err := c.container.networkCreateRequest(network)
+ if err != nil {
+ return err
+ }
+
+ if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
+ if _, ok := err.(libnetwork.NetworkNameError); ok {
+ continue
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *containerAdapter) removeNetworks(ctx context.Context) error {
+ for _, nid := range c.container.networks() {
+ if err := c.backend.DeleteManagedNetwork(nid); err != nil {
+ if _, ok := err.(*libnetwork.ActiveEndpointsError); ok {
+ continue
+ }
+ log.G(ctx).Errorf("network %s remove failed: %v", nid, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backend) error {
+ var cr types.ContainerCreateResponse
+ var err error
+ if cr, err = backend.CreateManagedContainer(types.ContainerCreateConfig{
+ Name: c.container.name(),
+ Config: c.container.config(),
+ HostConfig: c.container.hostConfig(),
+ // Use the first network in container create
+ NetworkingConfig: c.container.createNetworkingConfig(),
+ }); err != nil {
+ return err
+ }
+
+ // Docker daemon currently doesnt support multiple networks in container create
+ // Connect to all other networks
+ nc := c.container.connectNetworkingConfig()
+
+ if nc != nil {
+ for n, ep := range nc.EndpointsConfig {
+ logrus.Errorf("CONNECT %s : %v", n, ep.IPAMConfig.IPv4Address)
+ if err := backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *containerAdapter) start(ctx context.Context) error {
+ return c.backend.ContainerStart(c.container.name(), nil)
+}
+
+func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
+ cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false)
+ if ctx.Err() != nil {
+ return types.ContainerJSON{}, ctx.Err()
+ }
+ if err != nil {
+ return types.ContainerJSON{}, err
+ }
+ return *cs, nil
+}
+
+// events issues a call to the events API and returns a channel with all
+// events. The stream of events can be shutdown by cancelling the context.
+//
+// A chan struct{} is returned that will be closed if the event procressing
+// fails and needs to be restarted.
+func (c *containerAdapter) wait(ctx context.Context) (<-chan int, error) {
+ return c.backend.ContainerWaitWithContext(ctx, c.container.name())
+}
+
+func (c *containerAdapter) shutdown(ctx context.Context) error {
+ // Default stop grace period to 10s.
+ stopgrace := 10
+ spec := c.container.spec()
+ if spec.StopGracePeriod != nil {
+ stopgrace = int(spec.StopGracePeriod.Seconds)
+ }
+ return c.backend.ContainerStop(c.container.name(), stopgrace)
+}
+
+func (c *containerAdapter) terminate(ctx context.Context) error {
+ return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL))
+}
+
+func (c *containerAdapter) remove(ctx context.Context) error {
+ return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
+ RemoveVolume: true,
+ ForceRemove: true,
+ })
+}
+
+func (c *containerAdapter) createVolumes(ctx context.Context, backend executorpkg.Backend) error {
+ // Create plugin volumes that are embedded inside a Mount
+ for _, mount := range c.container.task.Spec.GetContainer().Mounts {
+ if mount.Type != api.MountTypeVolume {
+ continue
+ }
+
+ if mount.VolumeOptions != nil {
+ continue
+ }
+
+ if mount.VolumeOptions.DriverConfig == nil {
+ continue
+ }
+
+ req := c.container.volumeCreateRequest(&mount)
+
+ // Check if this volume exists on the engine
+ if _, err := backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil {
+ // TODO(amitshukla): Today, volume create through the engine api does not return an error
+ // when the named volume with the same parameters already exists.
+ // It returns an error if the driver name is different - that is a valid error
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+// todo: typed/wrapped errors
+func isContainerCreateNameConflict(err error) bool {
+ return strings.Contains(err.Error(), "Conflict. The name")
+}
+
+func isUnknownContainer(err error) bool {
+ return strings.Contains(err.Error(), "No such container:")
+}
+
+func isStoppedContainer(err error) bool {
+ return strings.Contains(err.Error(), "is already stopped")
+}
diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go
new file mode 100644
index 0000000000..1326bf1a8b
--- /dev/null
+++ b/daemon/cluster/executor/container/container.go
@@ -0,0 +1,415 @@
+package container
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "strings"
+ "time"
+
+ clustertypes "github.com/docker/docker/daemon/cluster/provider"
+ "github.com/docker/docker/reference"
+ "github.com/docker/engine-api/types"
+ enginecontainer "github.com/docker/engine-api/types/container"
+ "github.com/docker/engine-api/types/network"
+ "github.com/docker/swarmkit/agent/exec"
+ "github.com/docker/swarmkit/api"
+)
+
+const (
+ // Explictly use the kernel's default setting for CPU quota of 100ms.
+ // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
+ cpuQuotaPeriod = 100 * time.Millisecond
+
+ // systemLabelPrefix represents the reserved namespace for system labels.
+ systemLabelPrefix = "com.docker.swarm"
+)
+
+// containerConfig converts task properties into docker container compatible
+// components.
+type containerConfig struct {
+ task *api.Task
+ networksAttachments map[string]*api.NetworkAttachment
+}
+
+// newContainerConfig returns a validated container config. No methods should
+// return an error if this function returns without error.
+func newContainerConfig(t *api.Task) (*containerConfig, error) {
+ var c containerConfig
+ return &c, c.setTask(t)
+}
+
+func (c *containerConfig) setTask(t *api.Task) error {
+ container := t.Spec.GetContainer()
+ if container == nil {
+ return exec.ErrRuntimeUnsupported
+ }
+
+ if container.Image == "" {
+ return ErrImageRequired
+ }
+
+ // index the networks by name
+ c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
+ for _, attachment := range t.Networks {
+ c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
+ }
+
+ c.task = t
+ return nil
+}
+
+func (c *containerConfig) endpoint() *api.Endpoint {
+ return c.task.Endpoint
+}
+
+func (c *containerConfig) spec() *api.ContainerSpec {
+ return c.task.Spec.GetContainer()
+}
+
+func (c *containerConfig) name() string {
+ if c.task.Annotations.Name != "" {
+ // if set, use the container Annotations.Name field, set in the orchestrator.
+ return c.task.Annotations.Name
+ }
+
+ // fallback to service.slot.id.
+ return strings.Join([]string{c.task.ServiceAnnotations.Name, fmt.Sprint(c.task.Slot), c.task.ID}, ".")
+}
+
+func (c *containerConfig) image() string {
+ raw := c.spec().Image
+ ref, err := reference.ParseNamed(raw)
+ if err != nil {
+ return raw
+ }
+ return reference.WithDefaultTag(ref).String()
+}
+
+func (c *containerConfig) volumes() map[string]struct{} {
+ r := make(map[string]struct{})
+
+ for _, mount := range c.spec().Mounts {
+ // pick off all the volume mounts.
+ if mount.Type != api.MountTypeVolume {
+ continue
+ }
+
+ r[fmt.Sprintf("%s:%s", mount.Target, getMountMask(&mount))] = struct{}{}
+ }
+
+ return r
+}
+
+func (c *containerConfig) config() *enginecontainer.Config {
+ config := &enginecontainer.Config{
+ Labels: c.labels(),
+ User: c.spec().User,
+ Env: c.spec().Env,
+ WorkingDir: c.spec().Dir,
+ Image: c.image(),
+ Volumes: c.volumes(),
+ }
+
+ if len(c.spec().Command) > 0 {
+ // If Command is provided, we replace the whole invocation with Command
+ // by replacing Entrypoint and specifying Cmd. Args is ignored in this
+ // case.
+ config.Entrypoint = append(config.Entrypoint, c.spec().Command[0])
+ config.Cmd = append(config.Cmd, c.spec().Command[1:]...)
+ } else if len(c.spec().Args) > 0 {
+ // In this case, we assume the image has an Entrypoint and Args
+ // specifies the arguments for that entrypoint.
+ config.Cmd = c.spec().Args
+ }
+
+ return config
+}
+
+func (c *containerConfig) labels() map[string]string {
+ var (
+ system = map[string]string{
+ "task": "", // mark as cluster task
+ "task.id": c.task.ID,
+ "task.name": fmt.Sprintf("%v.%v", c.task.ServiceAnnotations.Name, c.task.Slot),
+ "node.id": c.task.NodeID,
+ "service.id": c.task.ServiceID,
+ "service.name": c.task.ServiceAnnotations.Name,
+ }
+ labels = make(map[string]string)
+ )
+
+ // base labels are those defined in the spec.
+ for k, v := range c.spec().Labels {
+ labels[k] = v
+ }
+
+ // we then apply the overrides from the task, which may be set via the
+ // orchestrator.
+ for k, v := range c.task.Annotations.Labels {
+ labels[k] = v
+ }
+
+ // finally, we apply the system labels, which override all labels.
+ for k, v := range system {
+ labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
+ }
+
+ return labels
+}
+
+func (c *containerConfig) bindMounts() []string {
+ var r []string
+
+ for _, val := range c.spec().Mounts {
+ mask := getMountMask(&val)
+ if val.Type == api.MountTypeBind {
+ r = append(r, fmt.Sprintf("%s:%s:%s", val.Source, val.Target, mask))
+ }
+ }
+
+ return r
+}
+
+func getMountMask(m *api.Mount) string {
+ maskOpts := []string{"ro"}
+ if m.Writable {
+ maskOpts[0] = "rw"
+ }
+
+ if m.BindOptions != nil {
+ switch m.BindOptions.Propagation {
+ case api.MountPropagationPrivate:
+ maskOpts = append(maskOpts, "private")
+ case api.MountPropagationRPrivate:
+ maskOpts = append(maskOpts, "rprivate")
+ case api.MountPropagationShared:
+ maskOpts = append(maskOpts, "shared")
+ case api.MountPropagationRShared:
+ maskOpts = append(maskOpts, "rshared")
+ case api.MountPropagationSlave:
+ maskOpts = append(maskOpts, "slave")
+ case api.MountPropagationRSlave:
+ maskOpts = append(maskOpts, "rslave")
+ }
+ }
+
+ if m.VolumeOptions != nil {
+ if !m.VolumeOptions.Populate {
+ maskOpts = append(maskOpts, "nocopy")
+ }
+ }
+ return strings.Join(maskOpts, ",")
+}
+
+func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
+ return &enginecontainer.HostConfig{
+ Resources: c.resources(),
+ Binds: c.bindMounts(),
+ }
+}
+
+// This handles the case of volumes that are defined inside a service Mount
+func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *types.VolumeCreateRequest {
+ var (
+ driverName string
+ driverOpts map[string]string
+ labels map[string]string
+ )
+
+ if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
+ driverName = mount.VolumeOptions.DriverConfig.Name
+ driverOpts = mount.VolumeOptions.DriverConfig.Options
+ labels = mount.VolumeOptions.Labels
+ }
+
+ if mount.VolumeOptions != nil {
+ return &types.VolumeCreateRequest{
+ Name: mount.Source,
+ Driver: driverName,
+ DriverOpts: driverOpts,
+ Labels: labels,
+ }
+ }
+ return nil
+}
+
+func (c *containerConfig) resources() enginecontainer.Resources {
+ resources := enginecontainer.Resources{}
+
+ // If no limits are specified let the engine use its defaults.
+ //
+ // TODO(aluzzardi): We might want to set some limits anyway otherwise
+ // "unlimited" tasks will step over the reservation of other tasks.
+ r := c.task.Spec.Resources
+ if r == nil || r.Limits == nil {
+ return resources
+ }
+
+ if r.Limits.MemoryBytes > 0 {
+ resources.Memory = r.Limits.MemoryBytes
+ }
+
+ if r.Limits.NanoCPUs > 0 {
+ // CPU Period must be set in microseconds.
+ resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
+ resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
+ }
+
+ return resources
+}
+
+// Docker daemon supports just 1 network during container create.
+func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig {
+ var networks []*api.NetworkAttachment
+ if c.task.Spec.GetContainer() != nil {
+ networks = c.task.Networks
+ }
+
+ epConfig := make(map[string]*network.EndpointSettings)
+ if len(networks) > 0 {
+ epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0])
+ }
+
+ return &network.NetworkingConfig{EndpointsConfig: epConfig}
+}
+
+// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
+func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig {
+ var networks []*api.NetworkAttachment
+ if c.task.Spec.GetContainer() != nil {
+ networks = c.task.Networks
+ }
+
+ // First network is used during container create. Other networks are used in "docker network connect"
+ if len(networks) < 2 {
+ return nil
+ }
+
+ epConfig := make(map[string]*network.EndpointSettings)
+ for _, na := range networks[1:] {
+ epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na)
+ }
+ return &network.NetworkingConfig{EndpointsConfig: epConfig}
+}
+
+func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings {
+ var ipv4, ipv6 string
+ for _, addr := range na.Addresses {
+ ip, _, err := net.ParseCIDR(addr)
+ if err != nil {
+ continue
+ }
+
+ if ip.To4() != nil {
+ ipv4 = ip.String()
+ continue
+ }
+
+ if ip.To16() != nil {
+ ipv6 = ip.String()
+ }
+ }
+
+ return &network.EndpointSettings{
+ IPAMConfig: &network.EndpointIPAMConfig{
+ IPv4Address: ipv4,
+ IPv6Address: ipv6,
+ },
+ }
+}
+
+func (c *containerConfig) virtualIP(networkID string) string {
+ if c.task.Endpoint == nil {
+ return ""
+ }
+
+ for _, eVip := range c.task.Endpoint.VirtualIPs {
+ // We only support IPv4 VIPs for now.
+ if eVip.NetworkID == networkID {
+ vip, _, err := net.ParseCIDR(eVip.Addr)
+ if err != nil {
+ return ""
+ }
+
+ return vip.String()
+ }
+ }
+
+ return ""
+}
+
+func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
+ if len(c.task.Networks) == 0 {
+ return nil
+ }
+
+ log.Printf("Creating service config in agent for t = %+v", c.task)
+ svcCfg := &clustertypes.ServiceConfig{
+ Name: c.task.ServiceAnnotations.Name,
+ ID: c.task.ServiceID,
+ VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
+ }
+
+ for _, na := range c.task.Networks {
+ svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
+ // We support only IPv4 virtual IP for now.
+ IPv4: c.virtualIP(na.Network.ID),
+ }
+ }
+
+ if c.task.Endpoint != nil {
+ for _, ePort := range c.task.Endpoint.Ports {
+ svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
+ Name: ePort.Name,
+ Protocol: int32(ePort.Protocol),
+ TargetPort: ePort.TargetPort,
+ PublishedPort: ePort.PublishedPort,
+ })
+ }
+ }
+
+ return svcCfg
+}
+
+// networks returns a list of network names attached to the container. The
+// returned name can be used to lookup the corresponding network create
+// options.
+func (c *containerConfig) networks() []string {
+ var networks []string
+
+ for name := range c.networksAttachments {
+ networks = append(networks, name)
+ }
+
+ return networks
+}
+
+func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
+ na, ok := c.networksAttachments[name]
+ if !ok {
+ return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
+ }
+
+ options := types.NetworkCreate{
+ // ID: na.Network.ID,
+ Driver: na.Network.DriverState.Name,
+ IPAM: network.IPAM{
+ Driver: na.Network.IPAM.Driver.Name,
+ },
+ Options: na.Network.DriverState.Options,
+ CheckDuplicate: true,
+ }
+
+ for _, ic := range na.Network.IPAM.Configs {
+ c := network.IPAMConfig{
+ Subnet: ic.Subnet,
+ IPRange: ic.Range,
+ Gateway: ic.Gateway,
+ }
+ options.IPAM.Config = append(options.IPAM.Config, c)
+ }
+
+ return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil
+}
diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go
new file mode 100644
index 0000000000..17aa454093
--- /dev/null
+++ b/daemon/cluster/executor/container/controller.go
@@ -0,0 +1,305 @@
+package container
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ executorpkg "github.com/docker/docker/daemon/cluster/executor"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/swarmkit/agent/exec"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+// controller implements agent.Controller against docker's API.
+//
+// Most operations against docker's API are done through the container name,
+// which is unique to the task.
+type controller struct {
+ backend executorpkg.Backend
+ task *api.Task
+ adapter *containerAdapter
+ closed chan struct{}
+ err error
+}
+
+var _ exec.Controller = &controller{}
+
+// NewController returns a dockerexec runner for the provided task.
+func newController(b executorpkg.Backend, task *api.Task) (*controller, error) {
+ adapter, err := newContainerAdapter(b, task)
+ if err != nil {
+ return nil, err
+ }
+
+ return &controller{
+ backend: b,
+ task: task,
+ adapter: adapter,
+ closed: make(chan struct{}),
+ }, nil
+}
+
+func (r *controller) Task() (*api.Task, error) {
+ return r.task, nil
+}
+
+// ContainerStatus returns the container-specific status for the task.
+func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
+ ctnr, err := r.adapter.inspect(ctx)
+ if err != nil {
+ if isUnknownContainer(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return parseContainerStatus(ctnr)
+}
+
+// Update tasks a recent task update and applies it to the container.
+func (r *controller) Update(ctx context.Context, t *api.Task) error {
+ log.G(ctx).Warnf("task updates not yet supported")
+ // TODO(stevvooe): While assignment of tasks is idempotent, we do allow
+ // updates of metadata, such as labelling, as well as any other properties
+ // that make sense.
+ return nil
+}
+
+// Prepare creates a container and ensures the image is pulled.
+//
+// If the container has already be created, exec.ErrTaskPrepared is returned.
+func (r *controller) Prepare(ctx context.Context) error {
+ if err := r.checkClosed(); err != nil {
+ return err
+ }
+
+ // Make sure all the networks that the task needs are created.
+ if err := r.adapter.createNetworks(ctx); err != nil {
+ return err
+ }
+
+ // Make sure all the volumes that the task needs are created.
+ if err := r.adapter.createVolumes(ctx, r.backend); err != nil {
+ return err
+ }
+
+ for {
+ if err := r.checkClosed(); err != nil {
+ return err
+ }
+ if err := r.adapter.create(ctx, r.backend); err != nil {
+ if isContainerCreateNameConflict(err) {
+ if _, err := r.adapter.inspect(ctx); err != nil {
+ return err
+ }
+
+ // container is already created. success!
+ return exec.ErrTaskPrepared
+ }
+
+ if !strings.Contains(err.Error(), "No such image") { // todo: better error detection
+ return err
+ }
+ if err := r.adapter.pullImage(ctx); err != nil {
+ return err
+ }
+
+ continue // retry to create the container
+ }
+
+ break
+ }
+
+ return nil
+}
+
+// Start the container. An error will be returned if the container is already started.
+func (r *controller) Start(ctx context.Context) error {
+ if err := r.checkClosed(); err != nil {
+ return err
+ }
+
+ ctnr, err := r.adapter.inspect(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Detect whether the container has *ever* been started. If so, we don't
+ // issue the start.
+ //
+ // TODO(stevvooe): This is very racy. While reading inspect, another could
+ // start the process and we could end up starting it twice.
+ if ctnr.State.Status != "created" {
+ return exec.ErrTaskStarted
+ }
+
+ if err := r.adapter.start(ctx); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Wait on the container to exit.
+func (r *controller) Wait(pctx context.Context) error {
+ if err := r.checkClosed(); err != nil {
+ return err
+ }
+
+ ctx, cancel := context.WithCancel(pctx)
+ defer cancel()
+
+ c, err := r.adapter.wait(ctx)
+ if err != nil {
+ return err
+ }
+
+ <-c
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ ctnr, err := r.adapter.inspect(ctx)
+ if err != nil {
+ // TODO(stevvooe): Need to handle missing container here. It is likely
+ // that a Wait call with a not found error should result in no waiting
+ // and no error at all.
+ return err
+ }
+
+ if ctnr.State.ExitCode != 0 {
+ var cause error
+ if ctnr.State.Error != "" {
+ cause = errors.New(ctnr.State.Error)
+ }
+ cstatus, _ := parseContainerStatus(ctnr)
+ return &exitError{
+ code: ctnr.State.ExitCode,
+ cause: cause,
+ containerStatus: cstatus,
+ }
+ }
+ return nil
+}
+
+// Shutdown the container cleanly.
+func (r *controller) Shutdown(ctx context.Context) error {
+ if err := r.checkClosed(); err != nil {
+ return err
+ }
+
+ if err := r.adapter.shutdown(ctx); err != nil {
+ if isUnknownContainer(err) || isStoppedContainer(err) {
+ return nil
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+// Terminate the container, with force.
+func (r *controller) Terminate(ctx context.Context) error {
+ if err := r.checkClosed(); err != nil {
+ return err
+ }
+
+ if err := r.adapter.terminate(ctx); err != nil {
+ if isUnknownContainer(err) {
+ return nil
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+// Remove the container and its resources.
+func (r *controller) Remove(ctx context.Context) error {
+ if err := r.checkClosed(); err != nil {
+ return err
+ }
+
+ // It may be necessary to shut down the task before removing it.
+ if err := r.Shutdown(ctx); err != nil {
+ if isUnknownContainer(err) {
+ return nil
+ }
+ // This may fail if the task was already shut down.
+ log.G(ctx).WithError(err).Debug("shutdown failed on removal")
+ }
+
+ // Try removing networks referenced in this task in case this
+ // task is the last one referencing it
+ if err := r.adapter.removeNetworks(ctx); err != nil {
+ if isUnknownContainer(err) {
+ return nil
+ }
+ return err
+ }
+
+ if err := r.adapter.remove(ctx); err != nil {
+ if isUnknownContainer(err) {
+ return nil
+ }
+
+ return err
+ }
+ return nil
+}
+
+// Close the runner and clean up any ephemeral resources.
+func (r *controller) Close() error {
+ select {
+ case <-r.closed:
+ return r.err
+ default:
+ r.err = exec.ErrControllerClosed
+ close(r.closed)
+ }
+ return nil
+}
+
+func (r *controller) checkClosed() error {
+ select {
+ case <-r.closed:
+ return r.err
+ default:
+ return nil
+ }
+}
+
+func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) {
+ status := &api.ContainerStatus{
+ ContainerID: ctnr.ID,
+ PID: int32(ctnr.State.Pid),
+ ExitCode: int32(ctnr.State.ExitCode),
+ }
+
+ return status, nil
+}
+
+type exitError struct {
+ code int
+ cause error
+ containerStatus *api.ContainerStatus
+}
+
+func (e *exitError) Error() string {
+ if e.cause != nil {
+ return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause)
+ }
+
+ return fmt.Sprintf("task: non-zero exit (%v)", e.code)
+}
+
+func (e *exitError) ExitCode() int {
+ return int(e.containerStatus.ExitCode)
+}
+
+func (e *exitError) Cause() error {
+ return e.cause
+}
diff --git a/daemon/cluster/executor/container/errors.go b/daemon/cluster/executor/container/errors.go
new file mode 100644
index 0000000000..6c03d36071
--- /dev/null
+++ b/daemon/cluster/executor/container/errors.go
@@ -0,0 +1,12 @@
+package container
+
+import "fmt"
+
+var (
+ // ErrImageRequired returned if a task is missing the image definition.
+ ErrImageRequired = fmt.Errorf("dockerexec: image required")
+
+ // ErrContainerDestroyed returned when a container is prematurely destroyed
+ // during a wait call.
+ ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed")
+)
diff --git a/daemon/cluster/executor/container/executor.go b/daemon/cluster/executor/container/executor.go
new file mode 100644
index 0000000000..bf5e248f62
--- /dev/null
+++ b/daemon/cluster/executor/container/executor.go
@@ -0,0 +1,139 @@
+package container
+
+import (
+ "strings"
+
+ executorpkg "github.com/docker/docker/daemon/cluster/executor"
+ clustertypes "github.com/docker/docker/daemon/cluster/provider"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/network"
+ networktypes "github.com/docker/libnetwork/types"
+ "github.com/docker/swarmkit/agent/exec"
+ "github.com/docker/swarmkit/api"
+ "golang.org/x/net/context"
+)
+
+type executor struct {
+ backend executorpkg.Backend
+}
+
+// NewExecutor returns an executor from the docker client.
+func NewExecutor(b executorpkg.Backend) exec.Executor {
+ return &executor{
+ backend: b,
+ }
+}
+
+// Describe returns the underlying node description from the docker client.
+func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
+ info, err := e.backend.SystemInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ var plugins []api.PluginDescription
+ addPlugins := func(typ string, names []string) {
+ for _, name := range names {
+ plugins = append(plugins, api.PluginDescription{
+ Type: typ,
+ Name: name,
+ })
+ }
+ }
+
+ addPlugins("Volume", info.Plugins.Volume)
+ // Add builtin driver "overlay" (the only builtin multi-host driver) to
+ // the plugin list by default.
+ addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
+ addPlugins("Authorization", info.Plugins.Authorization)
+
+ // parse []string labels into a map[string]string
+ labels := map[string]string{}
+ for _, l := range info.Labels {
+ stringSlice := strings.SplitN(l, "=", 2)
+ // this will take the last value in the list for a given key
+ // ideally, one shouldn't assign multiple values to the same key
+ if len(stringSlice) > 1 {
+ labels[stringSlice[0]] = stringSlice[1]
+ }
+ }
+
+ description := &api.NodeDescription{
+ Hostname: info.Name,
+ Platform: &api.Platform{
+ Architecture: info.Architecture,
+ OS: info.OSType,
+ },
+ Engine: &api.EngineDescription{
+ EngineVersion: info.ServerVersion,
+ Labels: labels,
+ Plugins: plugins,
+ },
+ Resources: &api.Resources{
+ NanoCPUs: int64(info.NCPU) * 1e9,
+ MemoryBytes: info.MemTotal,
+ },
+ }
+
+ return description, nil
+}
+
+func (e *executor) Configure(ctx context.Context, node *api.Node) error {
+ na := node.Attachment
+ if na == nil {
+ return nil
+ }
+
+ options := types.NetworkCreate{
+ Driver: na.Network.DriverState.Name,
+ IPAM: network.IPAM{
+ Driver: na.Network.IPAM.Driver.Name,
+ },
+ Options: na.Network.DriverState.Options,
+ CheckDuplicate: true,
+ }
+
+ for _, ic := range na.Network.IPAM.Configs {
+ c := network.IPAMConfig{
+ Subnet: ic.Subnet,
+ IPRange: ic.Range,
+ Gateway: ic.Gateway,
+ }
+ options.IPAM.Config = append(options.IPAM.Config, c)
+ }
+
+ return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
+ na.Network.ID,
+ types.NetworkCreateRequest{
+ Name: na.Network.Spec.Annotations.Name,
+ NetworkCreate: options,
+ },
+ }, na.Addresses[0])
+}
+
+// Controller returns a docker container runner.
+func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
+ ctlr, err := newController(e.backend, t)
+ if err != nil {
+ return nil, err
+ }
+
+ return ctlr, nil
+}
+
+func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
+ nwKeys := []*networktypes.EncryptionKey{}
+ for _, key := range keys {
+ nwKey := &networktypes.EncryptionKey{
+ Subsystem: key.Subsystem,
+ Algorithm: int32(key.Algorithm),
+ Key: make([]byte, len(key.Key)),
+ LamportTime: key.LamportTime,
+ }
+ copy(nwKey.Key, key.Key)
+ nwKeys = append(nwKeys, nwKey)
+ }
+ e.backend.SetNetworkBootstrapKeys(nwKeys)
+
+ return nil
+}
diff --git a/daemon/cluster/filters.go b/daemon/cluster/filters.go
new file mode 100644
index 0000000000..5890698d00
--- /dev/null
+++ b/daemon/cluster/filters.go
@@ -0,0 +1,93 @@
+package cluster
+
+import (
+ "fmt"
+ "strings"
+
+ runconfigopts "github.com/docker/docker/runconfig/opts"
+ "github.com/docker/engine-api/types/filters"
+ swarmapi "github.com/docker/swarmkit/api"
+)
+
+func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) {
+ accepted := map[string]bool{
+ "name": true,
+ "id": true,
+ "label": true,
+ "role": true,
+ "membership": true,
+ }
+ if err := filter.Validate(accepted); err != nil {
+ return nil, err
+ }
+ f := &swarmapi.ListNodesRequest_Filters{
+ Names: filter.Get("name"),
+ IDPrefixes: filter.Get("id"),
+ Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
+ }
+
+ for _, r := range filter.Get("role") {
+ if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok {
+ f.Roles = append(f.Roles, swarmapi.NodeRole(role))
+ } else if r != "" {
+ return nil, fmt.Errorf("Invalid role filter: '%s'", r)
+ }
+ }
+
+ for _, a := range filter.Get("membership") {
+ if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok {
+ f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership))
+ } else if a != "" {
+ return nil, fmt.Errorf("Invalid membership filter: '%s'", a)
+ }
+ }
+
+ return f, nil
+}
+
+func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) {
+ accepted := map[string]bool{
+ "name": true,
+ "id": true,
+ "label": true,
+ }
+ if err := filter.Validate(accepted); err != nil {
+ return nil, err
+ }
+ return &swarmapi.ListServicesRequest_Filters{
+ Names: filter.Get("name"),
+ IDPrefixes: filter.Get("id"),
+ Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
+ }, nil
+}
+
+func newListTasksFilters(filter filters.Args) (*swarmapi.ListTasksRequest_Filters, error) {
+ accepted := map[string]bool{
+ "name": true,
+ "id": true,
+ "label": true,
+ "service": true,
+ "node": true,
+ "desired_state": true,
+ }
+ if err := filter.Validate(accepted); err != nil {
+ return nil, err
+ }
+ f := &swarmapi.ListTasksRequest_Filters{
+ Names: filter.Get("name"),
+ IDPrefixes: filter.Get("id"),
+ Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
+ ServiceIDs: filter.Get("service"),
+ NodeIDs: filter.Get("node"),
+ }
+
+ for _, s := range filter.Get("desired_state") {
+ if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok {
+ f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state))
+ } else if s != "" {
+ return nil, fmt.Errorf("Invalid desired_state filter: '%s'", s)
+ }
+ }
+
+ return f, nil
+}
diff --git a/daemon/cluster/helpers.go b/daemon/cluster/helpers.go
new file mode 100644
index 0000000000..bb9e10f1f5
--- /dev/null
+++ b/daemon/cluster/helpers.go
@@ -0,0 +1,108 @@
+package cluster
+
+import (
+ "fmt"
+
+ swarmapi "github.com/docker/swarmkit/api"
+ "golang.org/x/net/context"
+)
+
+func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) {
+ rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{})
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rl.Clusters) == 0 {
+ return nil, fmt.Errorf("swarm not found")
+ }
+
+ // TODO: assume one cluster only
+ return rl.Clusters[0], nil
+}
+
+func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) {
+ // GetNode to match via full ID.
+ rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input})
+ if err != nil {
+ // If any error (including NotFound), ListNodes to match via full name.
+ rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}})
+
+ if err != nil || len(rl.Nodes) == 0 {
+ // If any error or 0 result, ListNodes to match via ID prefix.
+ rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}})
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rl.Nodes) == 0 {
+ return nil, fmt.Errorf("node %s not found", input)
+ }
+
+ if l := len(rl.Nodes); l > 1 {
+ return nil, fmt.Errorf("node %s is ambigious (%d matches found)", input, l)
+ }
+
+ return rl.Nodes[0], nil
+ }
+ return rg.Node, nil
+}
+
+func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) {
+ // GetService to match via full ID.
+ rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input})
+ if err != nil {
+ // If any error (including NotFound), ListServices to match via full name.
+ rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}})
+ if err != nil || len(rl.Services) == 0 {
+ // If any error or 0 result, ListServices to match via ID prefix.
+ rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}})
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rl.Services) == 0 {
+ return nil, fmt.Errorf("service %s not found", input)
+ }
+
+ if l := len(rl.Services); l > 1 {
+ return nil, fmt.Errorf("service %s is ambigious (%d matches found)", input, l)
+ }
+
+ return rl.Services[0], nil
+ }
+ return rg.Service, nil
+}
+
+func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) {
+ // GetTask to match via full ID.
+ rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input})
+ if err != nil {
+ // If any error (including NotFound), ListTasks to match via full name.
+ rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}})
+
+ if err != nil || len(rl.Tasks) == 0 {
+ // If any error or 0 result, ListTasks to match via ID prefix.
+ rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}})
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rl.Tasks) == 0 {
+ return nil, fmt.Errorf("task %s not found", input)
+ }
+
+ if l := len(rl.Tasks); l > 1 {
+ return nil, fmt.Errorf("task %s is ambigious (%d matches found)", input, l)
+ }
+
+ return rl.Tasks[0], nil
+ }
+ return rg.Task, nil
+}
diff --git a/daemon/cluster/provider/network.go b/daemon/cluster/provider/network.go
new file mode 100644
index 0000000000..d959c15ceb
--- /dev/null
+++ b/daemon/cluster/provider/network.go
@@ -0,0 +1,36 @@
+package provider
+
+import "github.com/docker/engine-api/types"
+
+// NetworkCreateRequest is a request when creating a network.
+type NetworkCreateRequest struct {
+ ID string
+ types.NetworkCreateRequest
+}
+
+// NetworkCreateResponse is a response when creating a network.
+type NetworkCreateResponse struct {
+ ID string `json:"Id"`
+}
+
+// VirtualAddress represents a virtual adress.
+type VirtualAddress struct {
+ IPv4 string
+ IPv6 string
+}
+
+// PortConfig represents a port configuration.
+type PortConfig struct {
+ Name string
+ Protocol int32
+ TargetPort uint32
+ PublishedPort uint32
+}
+
+// ServiceConfig represents a service configuration.
+type ServiceConfig struct {
+ ID string
+ Name string
+ VirtualAddresses map[string]*VirtualAddress
+ ExposedPorts []*PortConfig
+}
diff --git a/daemon/container.go b/daemon/container.go
index 421b0c25d9..a2d1f47cda 100644
--- a/daemon/container.go
+++ b/daemon/container.go
@@ -101,7 +101,7 @@ func (daemon *Daemon) Register(c *container.Container) error {
return nil
}
-func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) {
+func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID, managed bool) (*container.Container, error) {
var (
id string
err error
@@ -117,6 +117,7 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, i
base := daemon.newBaseContainer(id)
base.Created = time.Now().UTC()
+ base.Managed = managed
base.Path = entrypoint
base.Args = args //FIXME: de-duplicate from config
base.Config = config
diff --git a/daemon/container_operations.go b/daemon/container_operations.go
index 658ca78649..c6fade219b 100644
--- a/daemon/container_operations.go
+++ b/daemon/container_operations.go
@@ -324,6 +324,10 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
return nil
}
+func errClusterNetworkOnRun(n string) error {
+ return fmt.Errorf("swarm-scoped network (%s) is not compatible with `docker create` or `docker run`. This network can be only used docker service", n)
+}
+
// updateContainerNetworkSettings update the network settings
func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error {
var (
@@ -345,6 +349,9 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
if err != nil {
return err
}
+ if !container.Managed && n.Info().Dynamic() {
+ return errClusterNetworkOnRun(networkName)
+ }
networkName = n.Name()
}
if container.NetworkSettings == nil {
diff --git a/daemon/create.go b/daemon/create.go
index 2a3baa0f24..48e7245916 100644
--- a/daemon/create.go
+++ b/daemon/create.go
@@ -19,8 +19,17 @@ import (
"github.com/opencontainers/runc/libcontainer/label"
)
-// ContainerCreate creates a container.
+// CreateManagedContainer creates a container that is managed by a Service
+func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
+ return daemon.containerCreate(params, true)
+}
+
+// ContainerCreate creates a regular container
func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
+ return daemon.containerCreate(params, false)
+}
+
+func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (types.ContainerCreateResponse, error) {
if params.Config == nil {
return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container")
}
@@ -43,7 +52,7 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types
return types.ContainerCreateResponse{Warnings: warnings}, err
}
- container, err := daemon.create(params)
+ container, err := daemon.create(params, managed)
if err != nil {
return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err)
}
@@ -52,7 +61,7 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types
}
// Create creates a new container from the given configuration with a given name.
-func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) {
+func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) {
var (
container *container.Container
img *image.Image
@@ -76,7 +85,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *containe
return nil, err
}
- if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil {
+ if container, err = daemon.newContainer(params.Name, params.Config, imgID, managed); err != nil {
return nil, err
}
defer func() {
diff --git a/daemon/daemon.go b/daemon/daemon.go
index f00e801328..ed37e0b30e 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -28,6 +28,7 @@ import (
"github.com/docker/docker/daemon/exec"
"github.com/docker/engine-api/types"
containertypes "github.com/docker/engine-api/types/container"
+ "github.com/docker/libnetwork/cluster"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
dmetadata "github.com/docker/docker/distribution/metadata"
@@ -94,6 +95,7 @@ type Daemon struct {
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
+ clusterProvider cluster.Provider
}
func (daemon *Daemon) restore() error {
@@ -344,6 +346,12 @@ func (daemon *Daemon) registerLink(parent, child *container.Container, alias str
return nil
}
+// SetClusterProvider sets a component for quering the current cluster state.
+func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
+ daemon.clusterProvider = clusterProvider
+ daemon.netController.SetClusterProvider(clusterProvider)
+}
+
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
@@ -893,6 +901,10 @@ func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
return nil
}
+ if daemon.clusterProvider != nil {
+ return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode")
+ }
+
// enable discovery for the first time if it was not previously enabled
if daemon.discoveryWatcher == nil {
discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
diff --git a/daemon/inspect.go b/daemon/inspect.go
index e10402203f..ba9f6ecb2b 100644
--- a/daemon/inspect.go
+++ b/daemon/inspect.go
@@ -23,10 +23,12 @@ func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (
case versions.Equal(version, "1.20"):
return daemon.containerInspect120(name)
}
- return daemon.containerInspectCurrent(name, size)
+ return daemon.ContainerInspectCurrent(name, size)
}
-func (daemon *Daemon) containerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
+// ContainerInspectCurrent returns low-level information about a
+// container in a most recent api version.
+func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err
diff --git a/daemon/inspect_windows.go b/daemon/inspect_windows.go
index 22496e5b07..a23f703e09 100644
--- a/daemon/inspect_windows.go
+++ b/daemon/inspect_windows.go
@@ -28,7 +28,7 @@ func addMountPoints(container *container.Container) []types.MountPoint {
// containerInspectPre120 get containers for pre 1.20 APIs.
func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) {
- return daemon.containerInspectCurrent(name, false)
+ return daemon.ContainerInspectCurrent(name, false)
}
func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {
diff --git a/daemon/list.go b/daemon/list.go
index fd5b78dd60..48323d730c 100644
--- a/daemon/list.go
+++ b/daemon/list.go
@@ -91,6 +91,17 @@ func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.C
return daemon.reduceContainers(config, daemon.transformContainer)
}
+// ListContainersForNode returns all containerID that match the specified nodeID
+func (daemon *Daemon) ListContainersForNode(nodeID string) []string {
+ var ids []string
+ for _, c := range daemon.List() {
+ if c.Config.Labels["com.docker.swarm.node.id"] == nodeID {
+ ids = append(ids, c.ID)
+ }
+ }
+ return ids
+}
+
func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container {
idSearch := false
names := ctx.filters.Get("name")
diff --git a/daemon/network.go b/daemon/network.go
index 91a9c0bea7..f3203621d9 100644
--- a/daemon/network.go
+++ b/daemon/network.go
@@ -5,13 +5,14 @@ import (
"net"
"strings"
- netsettings "github.com/docker/docker/daemon/network"
+ "github.com/Sirupsen/logrus"
+ clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/errors"
"github.com/docker/docker/runconfig"
"github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
"github.com/docker/libnetwork"
+ networktypes "github.com/docker/libnetwork/types"
)
// NetworkControllerEnabled checks if the networking stack is enabled.
@@ -92,9 +93,106 @@ func (daemon *Daemon) getAllNetworks() []libnetwork.Network {
return list
}
+func isIngressNetwork(name string) bool {
+ return name == "ingress"
+}
+
+var ingressChan = make(chan struct{}, 1)
+
+func ingressWait() func() {
+ ingressChan <- struct{}{}
+ return func() { <-ingressChan }
+}
+
+// SetupIngress setups ingress networking.
+func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error {
+ ip, _, err := net.ParseCIDR(nodeIP)
+ if err != nil {
+ return err
+ }
+
+ go func() {
+ controller := daemon.netController
+ controller.AgentInitWait()
+
+ if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID {
+ if err := controller.SandboxDestroy("ingress-sbox"); err != nil {
+ logrus.Errorf("Failed to delete stale ingress sandbox: %v", err)
+ return
+ }
+
+ if err := n.Delete(); err != nil {
+ logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err)
+ return
+ }
+ }
+
+ if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
+ // If it is any other error other than already
+ // exists error log error and return.
+ if _, ok := err.(libnetwork.NetworkNameError); !ok {
+ logrus.Errorf("Failed creating ingress network: %v", err)
+ return
+ }
+
+ // Otherwise continue down the call to create or recreate sandbox.
+ }
+
+ n, err := daemon.GetNetworkByID(create.ID)
+ if err != nil {
+ logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
+ return
+ }
+
+ sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress())
+ if err != nil {
+ logrus.Errorf("Failed creating ingress sanbox: %v", err)
+ return
+ }
+
+ ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil))
+ if err != nil {
+ logrus.Errorf("Failed creating ingress endpoint: %v", err)
+ return
+ }
+
+ if err := ep.Join(sb, nil); err != nil {
+ logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err)
+ }
+ }()
+
+ return nil
+}
+
+// SetNetworkBootstrapKeys sets the bootstrap keys.
+func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error {
+ return daemon.netController.SetKeys(keys)
+}
+
+// CreateManagedNetwork creates an agent network.
+func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
+ _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
+ return err
+}
+
// CreateNetwork creates a network with the given name, driver and other optional parameters
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
- if runconfig.IsPreDefinedNetwork(create.Name) {
+ resp, err := daemon.createNetwork(create, "", false)
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+}
+
+func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
+ // If there is a pending ingress network creation wait here
+ // since ingress network creation can happen via node download
+ // from manager or task download.
+ if isIngressNetwork(create.Name) {
+ defer ingressWait()()
+ }
+
+ if runconfig.IsPreDefinedNetwork(create.Name) && !agent {
err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name)
return nil, errors.NewRequestForbiddenError(err)
}
@@ -134,7 +232,16 @@ func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.N
if create.Internal {
nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
}
- n, err := c.NewNetwork(driver, create.Name, "", nwOptions...)
+ if agent {
+ nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic())
+ nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false))
+ }
+
+ if isIngressNetwork(create.Name) {
+ nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress())
+ }
+
+ n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
return nil, err
}
@@ -168,6 +275,17 @@ func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnet
return ipamV4Cfg, ipamV6Cfg, nil
}
+// UpdateContainerServiceConfig updates a service configuration.
+func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
+ container, err := daemon.GetContainer(containerName)
+ if err != nil {
+ return err
+ }
+
+ container.NetworkSettings.Service = serviceConfig
+ return nil
+}
+
// ConnectContainerToNetwork connects the given container to the given
// network. If either cannot be found, an err is returned. If the
// network cannot be set up, an err is returned.
@@ -207,18 +325,29 @@ func (daemon *Daemon) GetNetworkDriverList() map[string]bool {
driver := network.Type()
pluginList[driver] = true
}
+ // TODO : Replace this with proper libnetwork API
+ pluginList["overlay"] = true
return pluginList
}
+// DeleteManagedNetwork deletes an agent network.
+func (daemon *Daemon) DeleteManagedNetwork(networkID string) error {
+ return daemon.deleteNetwork(networkID, true)
+}
+
// DeleteNetwork destroys a network unless it's one of docker's predefined networks.
func (daemon *Daemon) DeleteNetwork(networkID string) error {
+ return daemon.deleteNetwork(networkID, false)
+}
+
+func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error {
nw, err := daemon.FindNetwork(networkID)
if err != nil {
return err
}
- if runconfig.IsPreDefinedNetwork(nw.Name()) {
+ if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return errors.NewRequestForbiddenError(err)
}
@@ -230,14 +359,7 @@ func (daemon *Daemon) DeleteNetwork(networkID string) error {
return nil
}
-// FilterNetworks returns a list of networks filtered by the given arguments.
-// It returns an error if the filters are not included in the list of accepted filters.
-func (daemon *Daemon) FilterNetworks(netFilters filters.Args) ([]libnetwork.Network, error) {
- if netFilters.Len() != 0 {
- if err := netFilters.Validate(netsettings.AcceptedFilters); err != nil {
- return nil, err
- }
- }
- nwList := daemon.getAllNetworks()
- return netsettings.FilterNetworks(nwList, netFilters)
+// GetNetworks returns a list of all networks
+func (daemon *Daemon) GetNetworks() []libnetwork.Network {
+ return daemon.getAllNetworks()
}
diff --git a/daemon/network/settings.go b/daemon/network/settings.go
index 823bec2696..ff27cb0bbc 100644
--- a/daemon/network/settings.go
+++ b/daemon/network/settings.go
@@ -1,6 +1,7 @@
package network
import (
+ clustertypes "github.com/docker/docker/daemon/cluster/provider"
networktypes "github.com/docker/engine-api/types/network"
"github.com/docker/go-connections/nat"
)
@@ -14,6 +15,7 @@ type Settings struct {
LinkLocalIPv6Address string
LinkLocalIPv6PrefixLen int
Networks map[string]*networktypes.EndpointSettings
+ Service *clustertypes.ServiceConfig
Ports nat.PortMap
SandboxKey string
SecondaryIPAddresses []networktypes.Address
diff --git a/daemon/wait.go b/daemon/wait.go
index 52b335cdd7..bf7e2c7149 100644
--- a/daemon/wait.go
+++ b/daemon/wait.go
@@ -1,6 +1,10 @@
package daemon
-import "time"
+import (
+ "time"
+
+ "golang.org/x/net/context"
+)
// ContainerWait stops processing until the given container is
// stopped. If the container is not found, an error is returned. On a
@@ -15,3 +19,14 @@ func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, er
return container.WaitStop(timeout)
}
+
+// ContainerWaitWithContext returns a channel where exit code is sent
+// when container stops. Channel can be cancelled with a context.
+func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) (<-chan int, error) {
+ container, err := daemon.GetContainer(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return container.WaitWithContext(ctx), nil
+}
diff --git a/docs/reference/api/docker_remote_api_v1.24.md b/docs/reference/api/docker_remote_api_v1.24.md
index 0ee368c028..0c93778f3b 100644
--- a/docs/reference/api/docker_remote_api_v1.24.md
+++ b/docs/reference/api/docker_remote_api_v1.24.md
@@ -492,7 +492,6 @@ Status Codes:
Return low-level information on the container `id`
-
**Example request**:
GET /containers/4fa6e0f0c678/json HTTP/1.1
@@ -3306,6 +3305,1119 @@ Status Codes
- **404** - no such network
- **500** - server error
+## 3.6 Nodes
+
+**Note:** Nodes operations require to first be part of a Swarm.
+
+### List nodes
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`GET /nodes`
+
+List nodes
+
+**Example request**:
+
+ GET /nodes HTTP/1.1
+
+**Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ [
+ {
+ "ID": "24ifsmvkjbyhk",
+ "Version": {
+ "Index": 8
+ },
+ "CreatedAt": "2016-06-07T20:31:11.853781916Z",
+ "UpdatedAt": "2016-06-07T20:31:11.999868824Z",
+ "Spec": {
+ "Role": "MANAGER",
+ "Membership": "ACCEPTED",
+ "Availability": "ACTIVE"
+ },
+ "Description": {
+ "Hostname": "bf3067039e47",
+ "Platform": {
+ "Architecture": "x86_64",
+ "OS": "linux"
+ },
+ "Resources": {
+ "NanoCPUs": 4000000000,
+ "MemoryBytes": 8272408576
+ },
+ "Engine": {
+ "EngineVersion": "1.12.0-dev",
+ "Plugins": [
+ {
+ "Type": "Volume",
+ "Name": "local"
+ },
+ {
+ "Type": "Network",
+ "Name": "overlay"
+ }
+ ]
+ }
+ },
+ "Status": {
+ "State": "READY"
+ },
+ "Manager": {
+ "Raft": {
+ "RaftID": 10070664527094528000,
+ "Addr": "172.17.0.2:4500",
+ "Status": {
+ "Leader": true,
+ "Reachability": "REACHABLE"
+ }
+ }
+ },
+ "Attachment": {
+ "Network": {
+ "ID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Version": {
+ "Index": 6
+ },
+ "CreatedAt": "2016-06-07T20:31:11.912919752Z",
+ "UpdatedAt": "2016-06-07T20:31:11.921784144Z",
+ "Spec": {
+ "Name": "ingress",
+ "Labels": {
+ "com.docker.swarm.internal": "true"
+ },
+ "DriverConfiguration": {},
+ "IPAM": {
+ "Driver": {},
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "DriverState": {
+ "Name": "overlay",
+ "Options": {
+ "com.docker.network.driver.overlay.vxlanid_list": "256"
+ }
+ },
+ "IPAM": {
+ "Driver": {
+ "Name": "default"
+ },
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "Addresses": [
+ "10.255.0.2/16"
+ ]
+ }
+ }
+ ]
+
+Query Parameters:
+
+- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the
+ nodes list. Available filters:
+ - `id=<node id>`
+ - `name=<node name>`
+ - `membership=`(`pending`|`accepted`|`rejected`)`
+ - `role=`(`worker`|`manager`)`
+
+Status Codes:
+
+- **200** – no error
+- **500** – server error
+
+### Inspect a node
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`GET /nodes/<id>`
+
+Return low-level information on the node `id`
+
+**Example request**:
+
+ GET /node/24ifsmvkjbyhk HTTP/1.1
+
+**Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "ID": "24ifsmvkjbyhk",
+ "Version": {
+ "Index": 8
+ },
+ "CreatedAt": "2016-06-07T20:31:11.853781916Z",
+ "UpdatedAt": "2016-06-07T20:31:11.999868824Z",
+ "Spec": {
+ "Role": "MANAGER",
+ "Membership": "ACCEPTED",
+ "Availability": "ACTIVE"
+ },
+ "Description": {
+ "Hostname": "bf3067039e47",
+ "Platform": {
+ "Architecture": "x86_64",
+ "OS": "linux"
+ },
+ "Resources": {
+ "NanoCPUs": 4000000000,
+ "MemoryBytes": 8272408576
+ },
+ "Engine": {
+ "EngineVersion": "1.12.0-dev",
+ "Plugins": [
+ {
+ "Type": "Volume",
+ "Name": "local"
+ },
+ {
+ "Type": "Network",
+ "Name": "overlay"
+ }
+ ]
+ }
+ },
+ "Status": {
+ "State": "READY"
+ },
+ "Manager": {
+ "Raft": {
+ "RaftID": 10070664527094528000,
+ "Addr": "172.17.0.2:4500",
+ "Status": {
+ "Leader": true,
+ "Reachability": "REACHABLE"
+ }
+ }
+ },
+ "Attachment": {
+ "Network": {
+ "ID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Version": {
+ "Index": 6
+ },
+ "CreatedAt": "2016-06-07T20:31:11.912919752Z",
+ "UpdatedAt": "2016-06-07T20:31:11.921784144Z",
+ "Spec": {
+ "Name": "ingress",
+ "Labels": {
+ "com.docker.swarm.internal": "true"
+ },
+ "DriverConfiguration": {},
+ "IPAM": {
+ "Driver": {},
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "DriverState": {
+ "Name": "overlay",
+ "Options": {
+ "com.docker.network.driver.overlay.vxlanid_list": "256"
+ }
+ },
+ "IPAM": {
+ "Driver": {
+ "Name": "default"
+ },
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "Addresses": [
+ "10.255.0.2/16"
+ ]
+ }
+ }
+
+Status Codes:
+
+- **200** – no error
+- **404** – no such node
+- **500** – server error
+
+## 3.7 Swarm
+
+### Initialize a new Swarm
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`POST /swarm/init`
+
+Initialize a new Swarm
+
+**Example request**:
+
+ POST /swarm/init HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "ListenAddr": "0.0.0.0:4500",
+ "ForceNewCluster": false,
+ "Spec": {
+ "AcceptancePolicy": {
+ "Policies": [
+ {
+ "Role": "MANAGER",
+ "Autoaccept": false
+ },
+ {
+ "Role": "WORKER",
+ "Autoaccept": true
+ }
+ ]
+ },
+ "Orchestration": {},
+ "Raft": {},
+ "Dispatcher": {},
+ "CAConfig": {}
+ }
+ }
+
+**Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Length: 0
+ Content-Type: text/plain; charset=utf-8
+
+Status Codes:
+
+- **200** – no error
+- **400** – bad parameter
+- **500** – server error or node is already part of a Swarm
+
+JSON Parameters:
+
+- **ListenAddr** – Listen address used for inter-manager communication, as well as determining.
+ the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
+- **ForceNewCluster** – Force creating a new Swarm even if already part of one.
+- **Spec** – Configuration settings of the new Swarm.
+ - **Policies** – An array of acceptance policies.
+ - **Role** – The role that policy applies to (`MANAGER` or `WORKER`)
+ - **Autoaccept** – A boolean indicating whether nodes joining for that role should be
+ automatically accepted in the Swarm.
+ - **Secret** – An optional secret to provide for nodes to join the Swarm.
+ - **Orchestration** – Configuration settings for the orchestration aspects of the Swarm.
+ - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored.
+ - **RaftConfig** – Raft related configuration.
+ - **SnapshotInterval** – (TODO)
+ - **KeepOldSnapshots** – (TODO)
+ - **LogEntriesForSlowFollowers** – (TODO)
+ - **HeartbeatTick** – (TODO)
+ - **ElectionTick** – (TODO)
+ - **DispatcherConfig** – Configuration settings for the task dispatcher.
+ - **HeartbeatPeriod** – (TODO)
+ - **CAConfig** – CA configuration.
+ - **NodeCertExpiry** – Automatic expiry for nodes certificates.
+
+### Join an existing Swarm
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`POST /swarm/join`
+
+Join an existing new Swarm
+
+**Example request**:
+
+ POST /swarm/join HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "ListenAddr": "0.0.0.0:4500",
+ "RemoteAddr": "node1:4500",
+ "Secret": "",
+ "CAHash": "",
+ "Manager": false
+ }
+
+**Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Length: 0
+ Content-Type: text/plain; charset=utf-8
+
+Status Codes:
+
+- **200** – no error
+- **400** – bad parameter
+- **500** – server error or node is already part of a Swarm
+
+JSON Parameters:
+
+- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to
+ manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
+- **RemoteAddr** – Address of any manager node already participating in the Swarm to join.
+- **Secret** – Secret token for joining this Swarm.
+- **CAHash** – Optional hash of the root CA to avoid relying on trust on first use.
+- **Manager** – Directly join as a manager (only for a Swarm configured to autoaccept managers).
+
+### Leave a Swarm
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`POST /swarm/leave`
+
+Leave a Swarm
+
+**Example request**:
+
+ POST /swarm/leave HTTP/1.1
+
+**Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Length: 0
+ Content-Type: text/plain; charset=utf-8
+
+Status Codes:
+
+- **200** – no error
+- **500** – server error or node is not part of a Swarm
+
+### Update a Swarm
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`POST /swarm/update`
+
+Update a Swarm
+
+**Example request**:
+
+ POST /swarm/update HTTP/1.1
+
+ (TODO)
+
+**Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Length: 0
+ Content-Type: text/plain; charset=utf-8
+
+ (TODO)
+
+Status Codes:
+
+- **200** – no error
+- **400** – bad parameter
+- **500** – server error or node is not part of a Swarm
+
+## 3.8 Services
+
+**Note:** Service operations require to first be part of a Swarm.
+
+### List services
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`GET /services`
+
+List services
+
+**Example request**:
+
+ GET /services HTTP/1.1
+
+**Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ [
+ {
+ "ID": "9mnpnzenvg8p8tdbtq4wvbkcz",
+ "Version": {
+ "Index": 19
+ },
+ "CreatedAt": "2016-06-07T21:05:51.880065305Z",
+ "UpdatedAt": "2016-06-07T21:07:29.962229872Z",
+ "Spec": {
+ "Name": "hopeful_cori",
+ "Task": {
+ "ContainerSpec": {
+ "Image": "redis"
+ },
+ "Resources": {
+ "Limits": {},
+ "Reservations": {}
+ },
+ "RestartPolicy": {
+ "Condition": "ANY"
+ },
+ "Placement": {}
+ },
+ "Mode": {
+ "Replicated": {
+ "Instances": 1
+ }
+ },
+ "UpdateConfig": {
+ "Parallelism": 1
+ },
+ "EndpointSpec": {
+ "Mode": "VIP",
+ "Ingress": "PUBLICPORT",
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379
+ }
+ ]
+ }
+ },
+ "Endpoint": {
+ "Spec": {},
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379,
+ "PublicPort": 30000
+ }
+ ],
+ "VirtualIPs": [
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.2/16"
+ },
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.3/16"
+ }
+ ]
+ }
+ }
+ ]
+
+Query Parameters:
+
+- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the
+ services list. Available filters:
+ - `id=<node id>`
+ - `name=<node name>`
+
+Status Codes:
+
+- **200** – no error
+- **500** – server error
+
+### Create a service
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`POST /services/create`
+
+Create a service
+
+**Example request**:
+
+ POST /service/create HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "Name": "redis",
+ "Task": {
+ "ContainerSpec": {
+ "Image": "redis"
+ },
+ "Resources": {
+ "Limits": {},
+ "Reservations": {}
+ },
+ "RestartPolicy": {},
+ "Placement": {}
+ },
+ "Mode": {
+ "Replicated": {
+ "Instances": 1
+ }
+ },
+ "UpdateConfig": {
+ "Parallelism": 1
+ },
+ "EndpointSpec": {
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379
+ }
+ ]
+ }
+ }
+
+**Example response**:
+
+ HTTP/1.1 201 Created
+ Content-Type: application/json
+
+ {
+ "Id":"ak7w3gjqoa3kuz8xcpnyy0pvl"
+ }
+
+Status Codes:
+
+- **201** – no error
+- **500** – server error or node is not part of a Swarm
+
+JSON Parameters:
+
+- **Annotations** – Optional medata to associate with the service.
+ - **Name** – User-defined name for the service.
+ - **Labels** – A map of labels to associate with the service (e.g.,
+ `{"key":"value"[,"key2":"value2"]}`).
+- **Task** – Specification of the tasks to start as part of the new service.
+ - **ContainerSpec** - Container settings for containers started as part of this task.
+ - **Image** – A string specifying the image name to use for the container.
+ - **Command** – The command to be run in the image.
+ - **Args** – Arguments to the command.
+ - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`.
+ - **Dir** – A string specifying the working directory for commands to run in.
+ - **User** – A string value specifying the user inside the container.
+ - **Labels** – A map of labels to associate with the service (e.g.,
+ `{"key":"value"[,"key2":"value2"]}`).
+ - **Mounts** – Specification for mounts to be added to containers created as part of the new
+ service.
+ - **Target** – Container path.
+ - **Source** – Optional host path to be mounted in the target.
+ - **Type** – The mount type (`bind`, `epheremal`, or `volume`).
+ - **VolumeName** – A name for the volume.
+ - **Populate** – A boolean indicating if volume should be populated with the data form the
+ target (defaults to false).
+ - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or
+ `[r]slave` (`bind` type mounts only).
+ - **MCSAccessMode** – MCS label for sharing mode (`bind` type mounts only).
+ - **Writable** – A boolean indicating whether the mount should be writable.
+ - **VolumeTemplate** – Optional configuration for the volume.
+ - **Annotations** – User-defined name and labels for the volume.
+ - **Driver** – Name of the driver to be used and driver-specific options.
+ - **StopGracePeriod** – Amount of time to wait for the container to terminate before
+ forcefully killing it.
+ - **Resources** – Resource requirements which apply to each individual container created as part
+ of the service.
+ - **Limits** – Define resources limits.
+ - **CPU** – CPU limit
+ - **Memory** – Memory limit
+ - **Reservation** – Define resources reservation.
+ - **CPU** – CPU reservation
+ - **Memory** – Memory reservation
+ - **RestartPolicy** – Specification for the restart policy which applies to containers created
+ as part of this service.
+ - **Condition** – Condition for restart (`none`, `on_failure`, or `any`).
+ - **Delay** – Delay between restart attempts.
+ - **Attempts** – Maximum attempts to restart a given container before giving up (default value
+ is 0, which is ignored).
+ - **Window** – Windows is the time window used to evaluate the restart policy (default value is
+ 0, which is unbounded).
+ - **Placement** – An array of constraints.
+- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`).
+- **UpdateConfig** – Specification for the update strategy of the service.
+ - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited
+ parallelism).
+ - **Delay** – Amount of time between updates.
+- **Networks** – Array of network names or IDs to attach the service to.
+- **EndpointSpec** – (TODO)
+ - **EndpointSpecStrategy** – `network` or `disabled` (TODO)
+ - **ExposedPorts** – An object mapping ports to an empty object in the form of:
+ `"ExposedPorts": { "<port>/<tcp|udp>: {}" }`
+
+### Remove a service
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`DELETE /service/(id or name)`
+
+Stop and remove the service `id`
+
+**Example request**:
+
+ DELETE /service/16253994b7c4 HTTP/1.1
+
+**Example response**:
+
+ HTTP/1.1 204 No Content
+
+Status Codes:
+
+- **204** – no error
+- **404** – no such service
+- **500** – server error
+
+### Inspect a service
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`GET /service/(id or name)`
+
+Return information on the service `id`.
+
+**Example request**:
+
+ GET /service/1cb4dnqcyx6m66g2t538x3rxha HTTP/1.1
+
+**Example response**:
+
+ {
+ "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl",
+ "Version": {
+ "Index": 95
+ },
+ "CreatedAt": "2016-06-07T21:10:20.269723157Z",
+ "UpdatedAt": "2016-06-07T21:10:20.276301259Z",
+ "Spec": {
+ "Name": "redis",
+ "Task": {
+ "ContainerSpec": {
+ "Image": "redis"
+ },
+ "Resources": {
+ "Limits": {},
+ "Reservations": {}
+ },
+ "RestartPolicy": {
+ "Condition": "ANY"
+ },
+ "Placement": {}
+ },
+ "Mode": {
+ "Replicated": {
+ "Instances": 1
+ }
+ },
+ "UpdateConfig": {
+ "Parallelism": 1
+ },
+ "EndpointSpec": {
+ "Mode": "VIP",
+ "Ingress": "PUBLICPORT",
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379
+ }
+ ]
+ }
+ },
+ "Endpoint": {
+ "Spec": {},
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379,
+ "PublicPort": 30001
+ }
+ ],
+ "VirtualIPs": [
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.4/16"
+ }
+ ]
+ }
+ }
+
+Status Codes:
+
+- **200** – no error
+- **404** – no such service
+- **500** – server error
+
+### Update a service
+
+(TODO)
+
+## 3.9 Tasks
+
+**Note:** Tasks operations require to first be part of a Swarm.
+
+### List tasks
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`GET /tasks`
+
+List tasks
+
+**Example request**:
+
+ GET /tasks HTTP/1.1
+
+**Example response**:
+
+ [
+ {
+ "ID": "0kzzo1i0y4jz6027t0k7aezc7",
+ "Version": {
+ "Index": 71
+ },
+ "CreatedAt": "2016-06-07T21:07:31.171892745Z",
+ "UpdatedAt": "2016-06-07T21:07:31.376370513Z",
+ "Name": "hopeful_cori",
+ "Spec": {
+ "ContainerSpec": {
+ "Image": "redis"
+ },
+ "Resources": {
+ "Limits": {},
+ "Reservations": {}
+ },
+ "RestartPolicy": {
+ "Condition": "ANY"
+ },
+ "Placement": {}
+ },
+ "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz",
+ "Instance": 1,
+ "NodeID": "24ifsmvkjbyhk",
+ "ServiceAnnotations": {},
+ "Status": {
+ "Timestamp": "2016-06-07T21:07:31.290032978Z",
+ "State": "FAILED",
+ "Message": "execution failed",
+ "ContainerStatus": {}
+ },
+ "DesiredState": "SHUTDOWN",
+ "NetworksAttachments": [
+ {
+ "Network": {
+ "ID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Version": {
+ "Index": 18
+ },
+ "CreatedAt": "2016-06-07T20:31:11.912919752Z",
+ "UpdatedAt": "2016-06-07T21:07:29.955277358Z",
+ "Spec": {
+ "Name": "ingress",
+ "Labels": {
+ "com.docker.swarm.internal": "true"
+ },
+ "DriverConfiguration": {},
+ "IPAM": {
+ "Driver": {},
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "DriverState": {
+ "Name": "overlay",
+ "Options": {
+ "com.docker.network.driver.overlay.vxlanid_list": "256"
+ }
+ },
+ "IPAM": {
+ "Driver": {
+ "Name": "default"
+ },
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "Addresses": [
+ "10.255.0.10/16"
+ ]
+ }
+ ],
+ "Endpoint": {
+ "Spec": {},
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379,
+ "PublicPort": 30000
+ }
+ ],
+ "VirtualIPs": [
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.2/16"
+ },
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.3/16"
+ }
+ ]
+ }
+ },
+ {
+ "ID": "1yljwbmlr8er2waf8orvqpwms",
+ "Version": {
+ "Index": 30
+ },
+ "CreatedAt": "2016-06-07T21:07:30.019104782Z",
+ "UpdatedAt": "2016-06-07T21:07:30.231958098Z",
+ "Name": "hopeful_cori",
+ "Spec": {
+ "ContainerSpec": {
+ "Image": "redis"
+ },
+ "Resources": {
+ "Limits": {},
+ "Reservations": {}
+ },
+ "RestartPolicy": {
+ "Condition": "ANY"
+ },
+ "Placement": {}
+ },
+ "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz",
+ "Instance": 1,
+ "NodeID": "24ifsmvkjbyhk",
+ "ServiceAnnotations": {},
+ "Status": {
+ "Timestamp": "2016-06-07T21:07:30.202183143Z",
+ "State": "FAILED",
+ "Message": "execution failed",
+ "ContainerStatus": {}
+ },
+ "DesiredState": "SHUTDOWN",
+ "NetworksAttachments": [
+ {
+ "Network": {
+ "ID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Version": {
+ "Index": 18
+ },
+ "CreatedAt": "2016-06-07T20:31:11.912919752Z",
+ "UpdatedAt": "2016-06-07T21:07:29.955277358Z",
+ "Spec": {
+ "Name": "ingress",
+ "Labels": {
+ "com.docker.swarm.internal": "true"
+ },
+ "DriverConfiguration": {},
+ "IPAM": {
+ "Driver": {},
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "DriverState": {
+ "Name": "overlay",
+ "Options": {
+ "com.docker.network.driver.overlay.vxlanid_list": "256"
+ }
+ },
+ "IPAM": {
+ "Driver": {
+ "Name": "default"
+ },
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "Addresses": [
+ "10.255.0.5/16"
+ ]
+ }
+ ],
+ "Endpoint": {
+ "Spec": {},
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379,
+ "PublicPort": 30000
+ }
+ ],
+ "VirtualIPs": [
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.2/16"
+ },
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.3/16"
+ }
+ ]
+ }
+ }
+ ]
+
+Query Parameters:
+
+- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the
+ services list. Available filters:
+ - `id=<task id>`
+ - `name=<task name>`
+ - `service=<service name>`
+
+Status Codes:
+
+- **200** – no error
+- **500** – server error
+
+### Inspect a task
+
+**Warning:** this endpoint is part of the Swarm management feature introduced in Docker 1.12, and
+might be subject to non backward-compatible changes.
+
+`GET /tasks/(task id)`
+
+Get details on a task
+
+**Example request**:
+
+ GET /tasks/0kzzo1i0y4jz6027t0k7aezc7 HTTP/1.1
+
+**Example response**:
+
+ {
+ "ID": "0kzzo1i0y4jz6027t0k7aezc7",
+ "Version": {
+ "Index": 71
+ },
+ "CreatedAt": "2016-06-07T21:07:31.171892745Z",
+ "UpdatedAt": "2016-06-07T21:07:31.376370513Z",
+ "Name": "hopeful_cori",
+ "Spec": {
+ "ContainerSpec": {
+ "Image": "redis"
+ },
+ "Resources": {
+ "Limits": {},
+ "Reservations": {}
+ },
+ "RestartPolicy": {
+ "Condition": "ANY"
+ },
+ "Placement": {}
+ },
+ "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz",
+ "Instance": 1,
+ "NodeID": "24ifsmvkjbyhk",
+ "ServiceAnnotations": {},
+ "Status": {
+ "Timestamp": "2016-06-07T21:07:31.290032978Z",
+ "State": "FAILED",
+ "Message": "execution failed",
+ "ContainerStatus": {}
+ },
+ "DesiredState": "SHUTDOWN",
+ "NetworksAttachments": [
+ {
+ "Network": {
+ "ID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Version": {
+ "Index": 18
+ },
+ "CreatedAt": "2016-06-07T20:31:11.912919752Z",
+ "UpdatedAt": "2016-06-07T21:07:29.955277358Z",
+ "Spec": {
+ "Name": "ingress",
+ "Labels": {
+ "com.docker.swarm.internal": "true"
+ },
+ "DriverConfiguration": {},
+ "IPAM": {
+ "Driver": {},
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "DriverState": {
+ "Name": "overlay",
+ "Options": {
+ "com.docker.network.driver.overlay.vxlanid_list": "256"
+ }
+ },
+ "IPAM": {
+ "Driver": {
+ "Name": "default"
+ },
+ "Configs": [
+ {
+ "Family": "UNKNOWN",
+ "Subnet": "10.255.0.0/16"
+ }
+ ]
+ }
+ },
+ "Addresses": [
+ "10.255.0.10/16"
+ ]
+ }
+ ],
+ "Endpoint": {
+ "Spec": {},
+ "ExposedPorts": [
+ {
+ "Protocol": "tcp",
+ "Port": 6379,
+ "PublicPort": 30000
+ }
+ ],
+ "VirtualIPs": [
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.2/16"
+ },
+ {
+ "NetworkID": "4qvuz4ko70xaltuqbt8956gd1",
+ "Addr": "10.255.0.3/16"
+ }
+ ]
+ }
+ }
+
+Status Codes:
+
+- **200** – no error
+- **404** – unknown task
+- **500** – server error
+
# 4. Going further
## 4.1 Inside `docker run`
diff --git a/docs/reference/commandline/index.md b/docs/reference/commandline/index.md
index db71e48c0c..fff0dd9d44 100644
--- a/docs/reference/commandline/index.md
+++ b/docs/reference/commandline/index.md
@@ -86,3 +86,23 @@ You start the Docker daemon with the command line. How you start the daemon affe
* [volume_inspect](volume_inspect.md)
* [volume_ls](volume_ls.md)
* [volume_rm](volume_rm.md)
+
+### Swarm node commands
+
+* [node_accept](node_accept.md)
+* [node_reject](node_reject.md)
+* [node_promote](node_promote.md)
+* [node_demote](node_demote.md)
+* [node_inspect](node_inspect.md)
+* [node_update](node_update.md)
+* [node_tasks](node_tasks.md)
+* [node_ls](node_ls.md)
+* [node_rm](node_rm.md)
+
+### Swarm swarm commands
+
+* [swarm init](swarm_init.md)
+* [swarm join](swarm_join.md)
+* [swarm leave](swarm_leave.md)
+* [swarm update](swarm_update.md)
+
diff --git a/docs/reference/commandline/info.md b/docs/reference/commandline/info.md
index 8e171fecdd..1303aa0435 100644
--- a/docs/reference/commandline/info.md
+++ b/docs/reference/commandline/info.md
@@ -37,7 +37,7 @@ available on the volume where `/var/lib/docker` is mounted.
## Display Docker system information
Here is a sample output for a daemon running on Ubuntu, using the overlay
-storage driver:
+storage driver and a node that is part of a 2 node Swarm cluster:
$ docker -D info
Containers: 14
@@ -53,6 +53,11 @@ storage driver:
Plugins:
Volume: local
Network: bridge null host
+ Swarm:
+ NodeID: 0gac67oclbxq7
+ IsManager: YES
+ Managers: 2
+ Nodes: 2
Kernel Version: 4.4.0-21-generic
Operating System: Ubuntu 16.04 LTS
OSType: linux
diff --git a/docs/reference/commandline/inspect.md b/docs/reference/commandline/inspect.md
index 38d4098c0c..7220d69f5c 100644
--- a/docs/reference/commandline/inspect.md
+++ b/docs/reference/commandline/inspect.md
@@ -10,15 +10,15 @@ parent = "smn_cli"
# inspect
- Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
+ Usage: docker inspect [OPTIONS] CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]
- Return low-level information on a container or image
+ Return low-level information on a container or image or task
- -f, --format="" Format the output using the given go template
- --help Print usage
- --type=container|image Return JSON for specified type, permissible
- values are "image" or "container"
- -s, --size Display total file sizes if the type is container
+ -f, --format="" Format the output using the given go template
+ --help Print usage
+ --type=container|image|task Return JSON for specified type, permissible
+ values are "image" or "container" or "task"
+ -s, --size Display total file sizes if the type is container
By default, this will render all results in a JSON array. If the container and
image have the same name, this will return container JSON for unspecified type.
@@ -47,6 +47,10 @@ straightforward manner.
$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID
+**Get a Task's image name:**
+
+ $ docker inspect --format='{{.Container.Spec.Image}}' $INSTANCE_ID
+
**List All Port Bindings:**
One can loop over arrays and maps in the results to produce simple text
diff --git a/docs/reference/commandline/node_accept.md b/docs/reference/commandline/node_accept.md
new file mode 100644
index 0000000000..cc1100e84d
--- /dev/null
+++ b/docs/reference/commandline/node_accept.md
@@ -0,0 +1,28 @@
+<!--[metadata]>
++++
+title = "node accept"
+description = "The node accept command description and usage"
+keywords = ["node, accept"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# node accept
+
+ Usage: docker node accept NODE [NODE...]
+
+ Accept a node in the swarm
+
+Accept a node into the swarm. This command targets a docker engine that is a manager in the swarm cluster.
+
+
+```bash
+$ docker node accept <node name>
+```
+
+## Related information
+
+* [node reject](node_reject.md)
+* [node promote](node_promote.md)
+* [node demote](node_demote.md)
diff --git a/docs/reference/commandline/node_demote.md b/docs/reference/commandline/node_demote.md
new file mode 100644
index 0000000000..9393e80da1
--- /dev/null
+++ b/docs/reference/commandline/node_demote.md
@@ -0,0 +1,28 @@
+<!--[metadata]>
++++
+title = "node demote"
+description = "The node demote command description and usage"
+keywords = ["node, demote"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# node demote
+
+ Usage: docker node demote NODE [NODE...]
+
+ Demote a node as manager in the swarm
+
+Demotes an existing Manager so that it is no longer a manager. This command targets a docker engine that is a manager in the swarm cluster.
+
+
+```bash
+$ docker node demote <node name>
+```
+
+## Related information
+
+* [node accept](node_accept.md)
+* [node reject](node_reject.md)
+* [node promote](node_promote.md)
diff --git a/docs/reference/commandline/node_inspect.md b/docs/reference/commandline/node_inspect.md
new file mode 100644
index 0000000000..48119c1693
--- /dev/null
+++ b/docs/reference/commandline/node_inspect.md
@@ -0,0 +1,108 @@
+<!--[metadata]>
++++
+title = "node inspect"
+description = "The node inspect command description and usage"
+keywords = ["node, inspect"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# node inspect
+
+ Usage: docker node inspect [OPTIONS] self|NODE [NODE...]
+
+ Return low-level information on a volume
+
+ -f, --format= Format the output using the given go template.
+ --help Print usage
+ -p, --pretty Print the information in a human friendly format.
+
+Returns information about a node. By default, this command renders all results
+in a JSON array. You can specify an alternate format to execute a
+given template for each result. Go's
+[text/template](http://golang.org/pkg/text/template/) package describes all the
+details of the format.
+
+Example output:
+
+ $ docker node inspect swarm-manager
+ [
+ {
+ "ID": "0gac67oclbxq7",
+ "Version": {
+ "Index": 2028
+ },
+ "CreatedAt": "2016-06-06T20:49:32.720047494Z",
+ "UpdatedAt": "2016-06-07T00:23:31.207632893Z",
+ "Spec": {
+ "Role": "MANAGER",
+ "Membership": "ACCEPTED",
+ "Availability": "ACTIVE"
+ },
+ "Description": {
+ "Hostname": "swarm-manager",
+ "Platform": {
+ "Architecture": "x86_64",
+ "OS": "linux"
+ },
+ "Resources": {
+ "NanoCPUs": 1000000000,
+ "MemoryBytes": 1044250624
+ },
+ "Engine": {
+ "EngineVersion": "1.12.0",
+ "Labels": {
+ "provider": "virtualbox"
+ }
+ }
+ },
+ "Status": {
+ "State": "READY"
+ },
+ "Manager": {
+ "Raft": {
+ "RaftID": 2143745093569717375,
+ "Addr": "192.168.99.118:4500",
+ "Status": {
+ "Leader": true,
+ "Reachability": "REACHABLE"
+ }
+ }
+ },
+ "Attachment": {},
+ }
+ ]
+
+ $ docker node inspect --format '{{ .Manager.Raft.Status.Leader }}' self
+ false
+
+ $ docker node inspect --pretty self
+ ID: 2otfhz83efcc7
+ Hostname: ad960a848573
+ Status:
+ State: Ready
+ Availability: Active
+ Manager Status:
+ Address: 172.17.0.2:2377
+ Raft status: Reachable
+ Leader: Yes
+ Platform:
+ Operating System: linux
+ Architecture: x86_64
+ Resources:
+ CPUs: 4
+ Memory: 7.704 GiB
+ Plugins:
+ Network: overlay, bridge, null, host, overlay
+ Volume: local
+ Engine Version: 1.12.0
+
+## Related information
+
+* [node update](node_update.md)
+* [node tasks](node_tasks.md)
+* [node ls](node_ls.md)
+* [node rm](node_rm.md)
diff --git a/docs/reference/commandline/node_ls.md b/docs/reference/commandline/node_ls.md
new file mode 100644
index 0000000000..ce82f6b4b8
--- /dev/null
+++ b/docs/reference/commandline/node_ls.md
@@ -0,0 +1,89 @@
+<!--[metadata]>
++++
+title = "node ls"
+description = "The node ls command description and usage"
+keywords = ["node, list"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# node ls
+
+ Usage: docker node ls [OPTIONS]
+
+ List nodes in the swarm
+
+ Aliases:
+ ls, list
+
+ Options:
+ -f, --filter value Filter output based on conditions provided
+ --help Print usage
+ -q, --quiet Only display IDs
+
+Lists all the nodes that the Docker Swarm manager knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options.
+
+Example output:
+
+ $ docker node ls
+ ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
+ 0gac67oclbxq swarm-master Ready Active Reachable Yes
+ 0pwvm3ve66q7 swarm-node-02 Ready Active
+ 15xwihgw71aw * swarm-node-01 Ready Active Reachable
+
+
+## Filtering
+
+The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
+than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
+
+The currently supported filters are:
+
+* name
+* id
+* label
+* desired_state
+
+### name
+
+The `name` filter matches on all or part of a tasks's name.
+
+The following filter matches the node with a name equal to `swarm-master` string.
+
+ $ docker node ls -f name=swarm-master
+ ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
+ 0gac67oclbxq * swarm-master Ready Active Reachable Yes
+
+### id
+
+The `id` filter matches all or part of a node's id.
+
+ $ docker node ls -f id=0
+ ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
+ 0gac67oclbxq * swarm-master Ready Active Reachable Yes
+ 0pwvm3ve66q7 swarm-node-02 Ready Active
+
+
+#### label
+
+The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a
+value.
+
+The following filter matches nodes with the `usage` label regardless of its value.
+
+```bash
+$ docker node ls -f "label=foo"
+ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
+15xwihgw71aw * swarm-node-01 Ready Active Reachable
+```
+
+
+## Related information
+
+* [node inspect](node_inspect.md)
+* [node update](node_update.md)
+* [node tasks](node_tasks.md)
+* [node rm](node_rm.md)
diff --git a/docs/reference/commandline/node_promote.md b/docs/reference/commandline/node_promote.md
new file mode 100644
index 0000000000..933f362081
--- /dev/null
+++ b/docs/reference/commandline/node_promote.md
@@ -0,0 +1,28 @@
+<!--[metadata]>
++++
+title = "node promote"
+description = "The node promote command description and usage"
+keywords = ["node, promote"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# node promote
+
+ Usage: docker node promote NODE [NODE...]
+
+ Promote a node as manager in the swarm
+
+Promotes a node that is pending a promotion to manager. This command targets a docker engine that is a manager in the swarm cluster.
+
+
+```bash
+$ docker node promote <node name>
+```
+
+## Related information
+
+* [node accept](node_accept.md)
+* [node reject](node_reject.md)
+* [node demote](node_demote.md)
diff --git a/docs/reference/commandline/node_reject.md b/docs/reference/commandline/node_reject.md
new file mode 100644
index 0000000000..56dbe8bf45
--- /dev/null
+++ b/docs/reference/commandline/node_reject.md
@@ -0,0 +1,28 @@
+<!--[metadata]>
++++
+title = "node reject"
+description = "The node reject command description and usage"
+keywords = ["node, reject"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# node reject
+
+ Usage: docker node reject NODE [NODE...]
+
+ Reject a node from the swarm
+
+Reject a node from joining the swarm. This command targets a docker engine that is a manager in the swarm cluster.
+
+
+```bash
+$ docker node reject <node name>
+```
+
+## Related information
+
+* [node accept](node_accept.md)
+* [node promote](node_promote.md)
+* [node demote](node_demote.md)
diff --git a/docs/reference/commandline/node_rm.md b/docs/reference/commandline/node_rm.md
new file mode 100644
index 0000000000..6eb9a873cf
--- /dev/null
+++ b/docs/reference/commandline/node_rm.md
@@ -0,0 +1,38 @@
+<!--[metadata]>
++++
+title = "node rm"
+description = "The node rm command description and usage"
+keywords = ["node, remove"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# node rm
+
+ Usage: docker node rm NODE [NODE...]
+
+ Remove a node from the swarm
+
+ Aliases:
+ rm, remove
+
+ Options:
+ --help Print usage
+
+Removes nodes that are specified.
+
+Example output:
+
+ $ docker node rm swarm-node-02
+ Node swarm-node-02 removed from Swarm
+
+
+## Related information
+
+* [node inspect](node_inspect.md)
+* [node update](node_update.md)
+* [node tasks](node_tasks.md)
+* [node ls](node_ls.md)
diff --git a/docs/reference/commandline/node_tasks.md b/docs/reference/commandline/node_tasks.md
new file mode 100644
index 0000000000..5bd6832a8a
--- /dev/null
+++ b/docs/reference/commandline/node_tasks.md
@@ -0,0 +1,94 @@
+<!--[metadata]>
++++
+title = "node tasks"
+description = "The node tasks command description and usage"
+keywords = ["node, tasks"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# node tasks
+
+ Usage: docker node tasks [OPTIONS] NODE
+
+ List tasks running on a node
+
+ Options:
+ -a, --all Display all instances
+ -f, --filter value Filter output based on conditions provided
+ --help Print usage
+ -n, --no-resolve Do not map IDs to Names
+
+Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options.
+
+Example output:
+
+ $ docker node tasks swarm-master
+ ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
+ dx2g0fe3zsdb6y6q453f8dqw2 redis.1 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ 5v26yzixl3one3ptjyqqbd0ro redis.5 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ adcaphlhsfr30d47lby6walg6 redis.8 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ chancjvk9tex6768uzzacslq2 redis.9 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+
+
+## Filtering
+
+The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
+than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
+
+The currently supported filters are:
+
+* name
+* id
+* label
+* desired_state
+
+### name
+
+The `name` filter matches on all or part of a task's name.
+
+The following filter matches all tasks with a name containing the `redis` string.
+
+ $ docker node tasks -f name=redis swarm-master
+ ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
+ dx2g0fe3zsdb6y6q453f8dqw2 redis.1 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ 5v26yzixl3one3ptjyqqbd0ro redis.5 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ adcaphlhsfr30d47lby6walg6 redis.8 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+ chancjvk9tex6768uzzacslq2 redis.9 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+
+
+### id
+
+The `id` filter matches a task's id.
+
+ $ docker node tasks -f id=f33pcf8lwhs4c1t4kq8szwzta swarm-master
+ ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
+ f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+
+
+#### label
+
+The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a
+value.
+
+The following filter matches tasks with the `usage` label regardless of its value.
+
+```bash
+$ docker node tasks -f "label=usage"
+ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
+dx2g0fe3zsdb6y6q453f8dqw2 redis.1 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
+```
+
+
+## Related information
+
+* [node inspect](node_inspect.md)
+* [node update](node_update.md)
+* [node ls](node_ls.md)
+* [node rm](node_rm.md)
diff --git a/docs/reference/commandline/node_update.md b/docs/reference/commandline/node_update.md
new file mode 100644
index 0000000000..a48712d6c9
--- /dev/null
+++ b/docs/reference/commandline/node_update.md
@@ -0,0 +1,26 @@
+<!--[metadata]>
++++
+title = "node update"
+description = "The node update command description and usage"
+keywords = ["resources, update, dynamically"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+## update
+
+ Usage: docker node update [OPTIONS] Node
+
+ Update a node
+
+
+
+## Related information
+
+* [node inspect](node_inspect.md)
+* [node tasks](node_tasks.md)
+* [node ls](node_ls.md)
+* [node rm](node_rm.md)
diff --git a/docs/reference/commandline/swarm_init.md b/docs/reference/commandline/swarm_init.md
new file mode 100644
index 0000000000..d1e20b6175
--- /dev/null
+++ b/docs/reference/commandline/swarm_init.md
@@ -0,0 +1,69 @@
+<!--[metadata]>
++++
+title = "swarm init"
+description = "The swarm init command description and usage"
+keywords = ["swarm, init"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# swarm init
+
+ Usage: docker swarm init [OPTIONS]
+
+ Initialize a Swarm.
+
+ Options:
+ --auto-accept value Acceptance policy (default [worker,manager])
+ --force-new-cluster Force create a new cluster from current state.
+ --help Print usage
+ --listen-addr value Listen address (default 0.0.0.0:2377)
+ --secret string Set secret value needed to accept nodes into cluster
+
+Initialize a Swarm cluster. The docker engine targeted by this command becomes a manager
+in the newly created one node Swarm cluster.
+
+
+```bash
+$ docker swarm init --listen-addr 192.168.99.121:2377
+Initializing a new swarm
+$ docker node ls
+ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
+3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
+```
+
+### --auto-accept value
+
+This flag controls node acceptance into the cluster. By default, both `worker` and `manager`
+nodes are auto accepted by the cluster. This can be changed by specifing what kinds of nodes
+can be auto-accepted into the cluster. If auto-accept is not turned on, then
+[node accept](node_accept.md) can be used to explicitly accept a node into the cluster.
+
+For example, the following initializes a cluster with auto-acceptance of workers, but not managers
+
+
+```bash
+$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept worker
+Initializing a new swarm
+```
+
+### `--force-new-cluster`
+
+This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data
+
+### `--listen-addr value`
+
+The node listens for inbound Swarm manager traffic on this IP:PORT
+
+### `--secret string`
+
+Secret value needed to accept nodes into the Swarm
+
+## Related information
+
+* [swarm join](swarm_join.md)
+* [swarm leave](swarm_leave.md)
+* [swarm update](swarm_update.md)
diff --git a/docs/reference/commandline/swarm_join.md b/docs/reference/commandline/swarm_join.md
new file mode 100644
index 0000000000..a08c50abca
--- /dev/null
+++ b/docs/reference/commandline/swarm_join.md
@@ -0,0 +1,68 @@
+<!--[metadata]>
++++
+title = "swarm join"
+description = "The swarm join command description and usage"
+keywords = ["swarm, join"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# swarm join
+
+ Usage: docker swarm join [OPTIONS] HOST:PORT
+
+ Join a Swarm as a node and/or manager.
+
+ Options:
+ --help Print usage
+ --listen-addr value Listen address (default 0.0.0.0:2377)
+ --manager Try joining as a manager.
+ --secret string Secret for node acceptance
+
+Join a node to a Swarm cluster. If the `--manager` flag is specified, the docker engine
+targeted by this command becomes a `manager`. If it is not specified, it becomes a `worker`.
+
+### Join a node to swarm as a manager
+
+```bash
+$ docker swarm join --manager --listen-addr 192.168.99.122:2377 192.168.99.121:2377
+This node is attempting to join a Swarm as a manager.
+$ docker node ls
+ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
+2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
+3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
+```
+
+### Join a node to swarm as a worker
+
+```bash
+$ docker swarm join --listen-addr 192.168.99.123:2377 192.168.99.121:2377
+This node is attempting to join a Swarm.
+$ docker node ls
+ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
+04zm7ue1fd1q swarm-node-02 READY ACTIVE
+2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
+3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
+```
+
+### `--manager`
+
+Joins the node as a manager
+
+### `--listen-addr value`
+
+The node listens for inbound Swarm manager traffic on this IP:PORT
+
+### `--secret string`
+
+Secret value required for nodes to join the swarm
+
+
+## Related information
+
+* [swarm init](swarm_init.md)
+* [swarm leave](swarm_leave.md)
+* [swarm update](swarm_update.md)
diff --git a/docs/reference/commandline/swarm_leave.md b/docs/reference/commandline/swarm_leave.md
new file mode 100644
index 0000000000..d6ce6de6f5
--- /dev/null
+++ b/docs/reference/commandline/swarm_leave.md
@@ -0,0 +1,52 @@
+<!--[metadata]>
++++
+title = "swarm leave"
+description = "The swarm leave command description and usage"
+keywords = ["swarm, leave"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# swarm leave
+
+ Usage: docker swarm leave
+
+ Leave a Swarm swarm.
+
+ Options:
+ --help Print usage
+
+This command causes the node to leave the swarm.
+
+On a manager node:
+```bash
+$ docker node ls
+ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
+04zm7ue1fd1q swarm-node-02 READY ACTIVE
+2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
+3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
+```
+
+On a worker node:
+```bash
+$ docker swarm leave
+Node left the default swarm.
+```
+
+On a manager node:
+```bash
+$ docker node ls
+ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
+04zm7ue1fd1q swarm-node-02 DOWN ACTIVE
+2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
+3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
+```
+
+## Related information
+
+* [swarm init](swarm_init.md)
+* [swarm join](swarm_join.md)
+* [swarm update](swarm_update.md)
diff --git a/docs/reference/commandline/swarm_update.md b/docs/reference/commandline/swarm_update.md
new file mode 100644
index 0000000000..26a06a734c
--- /dev/null
+++ b/docs/reference/commandline/swarm_update.md
@@ -0,0 +1,37 @@
+<!--[metadata]>
++++
+title = "swarm update"
+description = "The swarm update command description and usage"
+keywords = ["swarm, update"]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
+
+# swarm update
+
+ Usage: docker swarm update [OPTIONS]
+
+ update the Swarm.
+
+ Options:
+ --auto-accept value Acceptance policy (default [worker,manager])
+ --help Print usage
+ --secret string Set secret value needed to accept nodes into cluster
+
+
+Updates a Swarm cluster with new parameter values. This command must target a manager node.
+
+
+```bash
+$ docker swarm update --auto-accept manager
+```
+
+## Related information
+
+* [swarm init](swarm_init.md)
+* [swarm join](swarm_join.md)
+* [swarm leave](swarm_leave.md)
+
diff --git a/docs/swarm/index.md b/docs/swarm/index.md
new file mode 100644
index 0000000000..abfd033502
--- /dev/null
+++ b/docs/swarm/index.md
@@ -0,0 +1,79 @@
+<!--[metadata]>
++++
+title = "Swarm overview"
+description = "Docker Swarm overview"
+keywords = ["docker, container, cluster, swarm"]
+[menu.main]
+identifier="swarm_overview"
+parent="engine_swarm"
+weight="1"
+advisory = "rc"
++++
+<![end-metadata]-->
+# Docker Swarm overview
+
+To use this version of Swarm, install the Docker Engine `v1.12.0-rc1` or later
+from the [Docker releases GitHub
+repository](https://github.com/docker/docker/releases). Alternatively, install
+the latest Docker for Mac or Docker for Windows Beta.
+
+Docker Engine 1.12 includes Docker Swarm for natively managing a cluster of
+Docker Engines called a Swarm. Use the Docker CLI to create a Swarm, deploy
+application services to the Swarm, and manage the Swarm behavior.
+
+
+If you’re using a Docker version prior to `v1.12.0-rc1`, see [Docker
+Swarm](https://docs.docker.com/swarm).
+
+## Feature highlights
+
+* **Cluster management integrated with Docker Engine:** Use the Docker Engine
+CLI to create a Swarm of Docker Engines where you can deploy application
+services. You don't need additional orchestration software to create or manage
+a Swarm.
+
+* **Decentralized design:** Instead of handling differentiation between node
+roles at deployment time, Swarm handles any specialization at runtime. You can
+deploy both kinds of nodes, managers and workers, using the Docker Engine.
+This means you can build an entire Swarm from a single disk image.
+
+* **Declarative service model:** Swarm uses a declarative syntax to let you
+define the desired state of the various services in your application stack.
+For example, you might describe an application comprised of a web front end
+service with message queueing services and a database backend.
+
+* **Desired state reconciliation:** Swarm constantly monitors the cluster state
+and reconciles any differences between the actual state your expressed desired
+state.
+
+* **Multi-host networking:** You can specify an overlay network for your
+application. Swarm automatically assigns addresses to the containers on the
+overlay network when it initializes or updates the application.
+
+* **Service discovery:** Swarm assigns each service a unique DNS name and load
+balances running containers. Each Swarm has an internal DNS server that can
+query every container in the cluster using DNS.
+
+* **Load balancing:** Using Swarm, you can expose the ports for services to an
+external load balancer. Internally, Swarm lets you specify how to distribute
+service containers between nodes.
+
+* **Secure by default:** Each node in the Swarm enforces TLS mutual
+authentication and encryption to secure communications between itself and all
+other nodes. You have the option to use self-signed root certificates or
+certificates from a custom root CA.
+
+* **Scaling:** For each service, you can declare the number of instances you
+want to run. When you scale up or down, Swarm automatically adapts by adding
+or removing instances of the service to maintain the desired state.
+
+* **Rolling updates:** At rollout time you can apply service updates to nodes
+incrementally. Swarm lets you control the delay between service deployment to
+different sets of nodes. If anything goes wrong, you can roll-back an instance
+of a service.
+
+## What's next?
+* Learn Swarm [key concepts](key-concepts.md).
+* Get started with the [Swarm tutorial](swarm-tutorial/index.md).
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/key-concepts.md b/docs/swarm/key-concepts.md
new file mode 100644
index 0000000000..e63c13eba0
--- /dev/null
+++ b/docs/swarm/key-concepts.md
@@ -0,0 +1,85 @@
+<!--[metadata]>
++++
+title = "Swarm key concepts"
+description = "Introducing key concepts for Docker Swarm"
+keywords = ["docker, container, cluster, swarm"]
+[menu.main]
+identifier="swarm-concepts"
+parent="engine_swarm"
+weight="2"
+advisory = "rc"
++++
+<![end-metadata]-->
+# Docker Swarm key concepts
+
+Building upon the core features of Docker Engine, Docker Swarm enables you to
+create a Swarm of Docker Engines and orchestrate services to run in the Swarm.
+This topic describes key concepts to help you begin using Docker Swarm.
+
+## Swarm
+
+**Docker Swarm** is the name for the cluster management and orchestration features
+embedded in the Docker Engine.
+
+A **Swarm** is a cluster of Docker Engines where you deploy a set of application
+services. When you deploy an application to a Swarm, you specify the desired
+state of the services, such as which services to run and how many instances of
+those services. The Swarm takes care of all orchestration duties required to
+keep the services running in the desired state.
+
+## Node
+
+A **node** is an active instance of the Docker Engine in the Swarm.
+
+When you deploy your application to a Swarm, **manager nodes** accept the
+service definition that describes the Swarm's desired state. Manager nodes also
+perform the orchestration and cluster management functions required to maintain
+the desired state of the Swarm. For example, when a manager node receives notice
+to deploy a web server, it dispatches the service tasks to worker nodes.
+
+By default the Docker Engine starts one manager node for a Swarm, but as you
+scale you can add more managers to make the cluster more fault-tolerant. If you
+require high availability Swarm management, Docker recommends three or five
+Managers in your cluster.
+
+Because Swarm manager nodes share data using Raft, there must be an odd number
+of managers. The Swarm cluster can continue functioning in the face of up to
+`N/2` failures where `N` is the number of manager nodes. More than five
+managers is likely to degrade cluster performance and is not recommended.
+
+**Worker nodes** receive and execute tasks dispatched from manager nodes. By
+default manager nodes are also worker nodes, but you can configure managers to
+be manager-only nodes.
+
+## Services and tasks
+
+A **service** is the definition of how to run the various tasks that make up
+your application. For example, you may create a service that deploys a Redis
+image in your Swarm.
+
+A **task** is the atomic scheduling unit of Swarm. For example a task may be to
+schedule a Redis container to run on a worker node.
+
+
+## Service types
+
+For **replicated services**, Swarm deploys a specific number of replica tasks
+based upon the scale you set in the desired state.
+
+For **global services**, Swarm runs one task for the service on every available
+node in the cluster.
+
+## Load balancing
+
+Swarm uses **ingress load balancing** to expose the services you want to make
+available externally to the Swarm. Swarm can automatically assign the service a
+**PublishedPort** or you can configure a PublishedPort for the service in the
+30000-32767 range. External components, such as cloud load balancers, can access
+the service on the PublishedPort of any node in the cluster, even if the node is
+not currently running the service.
+
+Swarm has an internal DNS component that automatically assigns each service in
+the Swarm DNS entry. Swarm uses **internal load balancing** distribute requests
+among services within the cluster based upon the services' DNS name.
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/menu.md b/docs/swarm/menu.md
new file mode 100644
index 0000000000..df43027f2f
--- /dev/null
+++ b/docs/swarm/menu.md
@@ -0,0 +1,21 @@
+<!--[metadata]>
++++
+title = "Manage a Swarm (1.12 RC)"
+description = "How to use Docker Swarm to create and manage Docker Engine clusters"
+keywords = [" docker, documentation, developer, "]
+[menu.main]
+identifier = "engine_swarm"
+parent = "engine_use"
+weight = 0
+advisory = "rc"
++++
+<![end-metadata]-->
+
+
+## Use Docker Swarm to create and manage clusters of Docker Engine called Swarms
+
+This section contains the following topics:
+
+* [Docker Swarm overview](index.md)
+* [Docker Swarm key concepts](key-concepts.md)
+* [Getting Started with Docker Swarm](swarm-tutorial/index.md)
diff --git a/docs/swarm/swarm-tutorial/add-nodes.md b/docs/swarm/swarm-tutorial/add-nodes.md
new file mode 100644
index 0000000000..9adb57f626
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/add-nodes.md
@@ -0,0 +1,64 @@
+<!--[metadata]>
++++
+title = "Add nodes to the Swarm"
+description = "Add nodes to the Swarm"
+keywords = ["tutorial, cluster management, swarm"]
+[menu.main]
+identifier="add-nodes"
+parent="swarm-tutorial"
+weight=13
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Add nodes to the Swarm
+
+Once you've [created a Swarm](create-swarm.md) with a manager node, you're ready
+to add worker nodes.
+
+1. Open a terminal and ssh into the machine where you want to run a worker node.
+This tutorial uses the name `worker1`.
+
+2. Run `docker swarm join MANAGER-IP:PORT` to create a worker node joined to the
+existing Swarm. Replace MANAGER-IP address of the manager node and the port
+where the manager listens.
+
+ In the tutorial, the following command joins `worker1` to the Swarm on `manager1`:
+
+ ```
+ $ docker swarm join 192.168.99.100:2377
+
+ This node joined a Swarm as a worker.
+ ```
+
+3. Open a terminal and ssh into the machine where you want to run a second
+worker node. This tutorial uses the name `worker2`.
+
+4. Run `docker swarm join MANAGER-IP:PORT` to create a worker node joined to
+the existing Swarm. Replace MANAGER-IP address of the manager node and the port
+where the manager listens.
+
+5. Open a terminal and ssh into the machine where the manager node runs and run
+the `docker node ls` command to see the worker nodes:
+
+ ```bash
+ $ docker node ls
+
+ ID NAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
+09fm6su6c24q * manager1 Accepted Ready Active Reachable Yes
+32ljq6xijzb9 worker1 Accepted Ready Active
+38fsncz6fal9 worker2 Accepted Ready Active
+ ```
+
+ The `MANAGER` column identifies the manager nodes in the Swarm. The empty
+ status in this column for `worker1` and `worker2` identifies them as worker nodes.
+
+ Swarm management commands like `docker node ls` only work on manager nodes.
+
+
+## What's next?
+
+Now your Swarm consists of a manager and two worker nodes. In the next step of
+the tutorial, you [deploy a service](deploy-service.md) to the Swarm.
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/create-swarm.md b/docs/swarm/swarm-tutorial/create-swarm.md
new file mode 100644
index 0000000000..1e0a9fc220
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/create-swarm.md
@@ -0,0 +1,77 @@
+<!--[metadata]>
++++
+title = "Create a Swarm"
+description = "Initialize the Swarm"
+keywords = ["tutorial, cluster management, swarm"]
+[menu.main]
+identifier="initialize-swarm"
+parent="swarm-tutorial"
+weight=12
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Create a Swarm
+
+After you complete the [tutorial setup](index.md) steps, you're ready
+to create a Swarm. Make sure the Docker Engine daemon is started on the host
+machines.
+
+1. Open a terminal and ssh into the machine where you want to run your manager
+node. For example, the tutorial uses a machine named `manager1`.
+
+2. Run `docker swarm init --listen-addr MANAGER-IP:PORT` to create a new Swarm.
+
+ In the tutorial, the following command creates a Swarm on the `manager1` machine:
+
+ ```
+ $ docker swarm init --listen-addr 192.168.99.100:2377
+
+ Swarm initialized: current node (09fm6su6c24qn) is now a manager.
+ ```
+
+ The `--listen-addr` flag configures the manager node to listen on port
+ `2377`. The other nodes in the Swarm must be able to access the manager at
+ the IP address.
+
+3. Run `docker info` to view the current state of the Swarm:
+
+ ```
+ $ docker info
+
+ Containers: 2
+ Running: 0
+ Paused: 0
+ Stopped: 2
+ ...snip...
+ Swarm:
+ NodeID: 09fm6su6c24qn
+ IsManager: YES
+ Managers: 1
+ Nodes: 1
+ ...snip...
+ ```
+
+4. Run the `docker node ls` command to view information about nodes:
+
+ ```
+ $ docker node ls
+
+ ID NAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
+09fm6su6c24q * manager1 Accepted Ready Active Reachable Yes
+
+ ```
+
+ The `*` next to the node id, indicates that you're currently connected on
+ this node.
+
+ Docker Swarm automatically names the node for the machine host name. The
+ tutorial covers other columns in later steps.
+
+## What's next?
+
+In the next section of the tutorial, we'll [add two more nodes](add-nodes.md) to
+the cluster.
+
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/delete-service.md b/docs/swarm/swarm-tutorial/delete-service.md
new file mode 100644
index 0000000000..63c679b410
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/delete-service.md
@@ -0,0 +1,44 @@
+<!--[metadata]>
++++
+title = "Delete the service"
+description = "Remove the service on the Swarm"
+keywords = ["tutorial, cluster management, swarm, service"]
+[menu.main]
+identifier="swarm-tutorial-delete-service"
+parent="swarm-tutorial"
+weight=19
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Delete the service running on the Swarm
+
+The remaining steps in the tutorial don't use the `helloworld` service, so now
+you can delete the service from the Swarm.
+
+1. If you haven't already, open a terminal and ssh into the machine where you
+run your manager node. For example, the tutorial uses a machine named
+`manager1`.
+
+2. Run `docker service remove helloworld` to remove the `helloworld` service.
+
+ ```
+ $ docker service rm helloworld
+ helloworld
+ ```
+
+3. Run `docker service inspect SERVICE-ID` to veriy that Swarm removed the
+service. The CLI returns a message that the service is not found:
+
+ ```
+ $ docker service inspect helloworld
+ []
+ Error: no such service or task: helloworld
+ ```
+
+## What's next?
+
+In the next step of the tutorial, you set up a new service and and apply a
+[rolling update](rolling-update.md).
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/deploy-service.md b/docs/swarm/swarm-tutorial/deploy-service.md
new file mode 100644
index 0000000000..0b24e0057b
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/deploy-service.md
@@ -0,0 +1,50 @@
+<!--[metadata]>
++++
+title = "Deploy a service"
+description = "Deploy the application"
+keywords = ["tutorial, cluster management, swarm"]
+[menu.main]
+identifier="deploy-application"
+parent="swarm-tutorial"
+weight=16
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Deploy a service to the Swarm
+
+After you [create a Swarm](create-swarm.md), you can deploy a service to the
+Swarm. For this tutorial, you also [added worker nodes](add-nodes.md), but that
+is not a requirement to deploy a service.
+
+1. Open a terminal and ssh into the machine where you run your manager node. For
+example, the tutorial uses a machine named `manager1`.
+
+2. Run the the following command:
+
+ ```bash
+ $ docker service create --scale 1 --name helloworld alpine ping docker.com
+
+ 2zs4helqu64f3k3iuwywbk49w
+ ```
+
+ * The `docker service create` command creates the service.
+ * The `--name` flag names the service `helloworld`.
+ * The `--scale` flag specifies the desired state of 1 running instance.
+ * The arguments `alpine ping docker.com` define the service as an Alpine
+ Linux container that executes the command `ping docker.com`.
+
+3. Run `docker service ls` to see the list of running services:
+
+ ```
+ $ docker service ls
+
+ ID NAME SCALE IMAGE COMMAND
+ 2zs4helqu64f helloworld 1 alpine ping docker.com
+ ```
+
+## What's next?
+
+Now you've deployed a service to the Swarm, you're ready to [inspect the service](inspect-service.md).
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/drain-node.md b/docs/swarm/swarm-tutorial/drain-node.md
new file mode 100644
index 0000000000..49bb81165e
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/drain-node.md
@@ -0,0 +1,129 @@
+<!--[metadata]>
++++
+title = "Drain a node"
+description = "Drain nodes on the Swarm"
+keywords = ["tutorial, cluster management, swarm, service, drain"]
+[menu.main]
+identifier="swarm-tutorial-drain-node"
+parent="swarm-tutorial"
+weight=21
++++
+<![end-metadata]-->
+
+# Drain a node on the Swarm
+
+In earlier steps of the tutorial, all the nodes have been running with `ACTIVE`
+availability. The Swarm manager can assign tasks to any `ACTIVE` node, so all
+nodes have been available to receive tasks.
+
+Sometimes, such as planned maintenance times, you need to set a node to `DRAIN`
+availabilty. `DRAIN` availabilty prevents a node from receiving new tasks
+from the Swarm manager. It also means the manager stops tasks running on the
+node and launches replica tasks on a node with `ACTIVE` availability.
+
+1. If you haven't already, open a terminal and ssh into the machine where you
+run your manager node. For example, the tutorial uses a machine named
+`manager1`.
+
+2. Verify that all your nodes are actively available.
+
+ ```
+ $ docker node ls
+
+ ID NAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
+ 1x2bldyhie1cj worker1 Accepted Ready Active
+ 1y3zuia1z224i worker2 Accepted Ready Active
+ 2p5bfd34mx4op * manager1 Accepted Ready Active Reachable Yes
+ ```
+
+2. If you aren't still running the `redis` service from the [rolling
+update](rolling-update.md) tutorial, start it now:
+
+ ```bash
+ $ docker service create --scale 3 --name redis --update-delay 10s --update-parallelism 1 redis:3.0.6
+
+ 69uh57k8o03jtqj9uvmteodbb
+ ```
+
+3. Run `docker service tasks redis` to see how the Swarm manager assigned the
+tasks to different nodes:
+
+ ```
+ $ docker service tasks redis
+ ID NAME SERVICE IMAGE LAST STATE DESIRED STATE NODE
+ 3wfqsgxecktpwoyj2zjcrcn4r redis.1 redis redis:3.0.6 RUNNING 13 minutes RUNNING worker2
+ 8lcm041z3v80w0gdkczbot0gg redis.2 redis redis:3.0.6 RUNNING 13 minutes RUNNING worker1
+ d48skceeph9lkz4nbttig1z4a redis.3 redis redis:3.0.6 RUNNING 12 minutes RUNNING manager1
+ ```
+
+ In this case the Swarm manager distributed one task to each node. You may
+ see the tasks distributed differently among the nodes in your environment.
+
+4. Run `docker node update --availability drain NODE-ID` to drain a node that
+had a task assigned to it:
+
+ ```bash
+ docker node update --availability drain worker1
+ worker1
+ ```
+
+5. Inspect the node to check its availability:
+
+ ```
+ $ docker node inspect --pretty worker1
+ ID: 1x2bldyhie1cj
+ Hostname: worker1
+ Status:
+ State: READY
+ Availability: DRAIN
+ ...snip...
+ ```
+
+ The drained node shows `Drain` for `AVAILABILITY`.
+
+6. Run `docker service tasks redis` to see how the Swarm manager updated the
+task assignments for the `redis` service:
+
+ ```
+ ID NAME SERVICE IMAGE LAST STATE DESIRED STATE NODE
+ 3wfqsgxecktpwoyj2zjcrcn4r redis.1 redis redis:3.0.6 RUNNING 26 minutes RUNNING worker2
+ ah7o4u5upostw3up1ns9vbqtc redis.2 redis redis:3.0.6 RUNNING 9 minutes RUNNING manager1
+ d48skceeph9lkz4nbttig1z4a redis.3 redis redis:3.0.6 RUNNING 26 minutes RUNNING manager1
+ ```
+
+ The Swarm manager maintains the desired state by ending the task on a node
+ with `Drain` availability and creating a new task on a node with `Active`
+ availability.
+
+7. Run `docker node update --availability active NODE-ID` to return the drained
+node to an active state:
+
+ ```bash
+ $ docker node update --availability active worker1
+ worker1
+ ```
+
+8. Inspect the node to see the updated state:
+
+ ```
+ $ docker node inspect --pretty worker1
+ ID: 1x2bldyhie1cj
+ Hostname: worker1
+ Status:
+ State: READY
+ Availability: ACTIVE
+ ...snip...
+ ```
+
+ When you set the node back to `Active` availability, it can receive new tasks:
+
+ * during a service update to scale up
+ * during a rolling update
+ * when you set another node to `Drain` availability
+ * when a task fails on another active node
+
+## What's next?
+
+The next topic in the tutorial introduces volumes.
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/index.md b/docs/swarm/swarm-tutorial/index.md
new file mode 100644
index 0000000000..4d4fdb07b9
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/index.md
@@ -0,0 +1,87 @@
+<!--[metadata]>
++++
+title = "Set up for the tutorial"
+description = "Getting Started tutorial for Docker Swarm"
+keywords = ["tutorial, cluster management, swarm"]
+[menu.main]
+identifier="tutorial-setup"
+parent="swarm-tutorial"
+weight=11
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Getting Started with Docker Swarm
+
+This tutorial introduces you to the key features of Docker Swarm. It guides you
+through the following activities:
+
+* initializing a cluster of Docker Engines called a Swarm
+* adding nodes to the Swarm
+* deploying application services to the Swarm
+* managing the Swarm once you have everything running
+
+This tutorial uses Docker Engine CLI commands entered on the command line of a
+terminal window. You should be able to install Docker on networked machines and
+be comfortable running commands in the shell of your choice.
+
+If you’re brand new to Docker, see [About Docker Engine](../../index.md).
+
+## Set up
+To run this tutorial, you need the following:
+
+* [three networked host machines](#three-networked-host-machines)
+* [Docker Engine 1.12 or later installed](#docker-engine-1-12-or-later)
+* [the IP address of the manager machine](#the-ip-address-of-the-manager-machine)
+* [open ports between the hosts](#open-ports-between-the-hosts)
+
+### Three networked host machines
+
+The tutorial uses three networked host machines as nodes in the Swarm. These can
+be virtual machines on your PC, in a data center, or on a cloud service
+provider. This tutorial uses the following machine names:
+
+* manager1
+* worker1
+* worker2
+
+### Docker Engine 1.12 or later
+
+You must install Docker Engine on each one of the host machines. To use this
+version of Swarm, install the Docker Engine `v1.12.0-rc1` or later from the
+[Docker releases GitHub repository](https://github.com/docker/docker/releases).
+Alternatively, install the latest Docker for Mac or Docker for Windows Beta.
+
+Verify that the Docker Engine daemon is running on each of the machines.
+
+<!-- See the following options to install:
+
+* [Install Docker Engine](../../installation/index.md).
+
+* [Example: Manual install on cloud provider](../../installation/cloud/cloud-ex-aws.md).
+-->
+
+### The IP address of the manager machine
+
+The IP address must be assigned to an a network interface available to the host
+operating system. All nodes in the Swarm must be able to access the manager at the IP address.
+
+>**Tip**: You can run `ifconfig` on Linux or Mac OSX to see a list of the
+available network interfaces.
+
+The tutorial uses `manager1` : `192.168.99.100`.
+
+### Open ports between the hosts
+
+* **TCP port 2377** for cluster management communications
+* **TCP** and **UDP port 7946** for communication among nodes
+* **TCP** and **UDP port 4789** for overlay network traffic
+
+>**Tip**: Docker recommends that every node in the cluster be on the same layer
+3 (IP) subnet with all traffic permitted between nodes.
+
+## What's next?
+
+After you have set up your environment, you're ready to [create a Swarm](create-swarm.md).
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/inspect-service.md b/docs/swarm/swarm-tutorial/inspect-service.md
new file mode 100644
index 0000000000..8e4e3af9f9
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/inspect-service.md
@@ -0,0 +1,124 @@
+<!--[metadata]>
++++
+title = "Inspect the service"
+description = "Inspect the application"
+keywords = ["tutorial, cluster management, swarm"]
+[menu.main]
+identifier="inspect-application"
+parent="swarm-tutorial"
+weight=17
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Inspect a service on the Swarm
+
+When you have [deployed a service](deploy-service.md) to your Swarm, you can use
+the Docker CLI to see details about the service running in the Swarm.
+
+1. If you haven't already, open a terminal and ssh into the machine where you
+run your manager node. For example, the tutorial uses a machine named
+`manager1`.
+
+2. Run `docker service inspect --pretty SERVICE-ID` to display the details about
+a service in an easily readable format.
+
+ To see the details on the `helloworld` service:
+ ```
+ $ docker service inspect --pretty helloworld
+
+ ID: 2zs4helqu64f3k3iuwywbk49w
+ Name: helloworld
+ Mode: REPLICATED
+ Scale: 1
+ Placement:
+ Strategy: SPREAD
+ UpateConfig:
+ Parallelism: 1
+ ContainerSpec:
+ Image: alpine
+ Command: ping docker.com
+ ```
+
+ >**Tip**: To return the service details in json format, run the same command
+ without the `--pretty` flag.
+
+ ```
+ $ docker service inspect helloworld
+ [
+ {
+ "ID": "2zs4helqu64f3k3iuwywbk49w",
+ "Version": {
+ "Index": 16264
+ },
+ "CreatedAt": "2016-06-06T17:41:11.509146705Z",
+ "UpdatedAt": "2016-06-06T17:41:11.510426385Z",
+ "Spec": {
+ "Name": "helloworld",
+ "ContainerSpec": {
+ "Image": "alpine",
+ "Command": [
+ "ping",
+ "docker.com"
+ ],
+ "Resources": {
+ "Limits": {},
+ "Reservations": {}
+ }
+ },
+ "Mode": {
+ "Replicated": {
+ "Instances": 1
+ }
+ },
+ "RestartPolicy": {},
+ "Placement": {},
+ "UpdateConfig": {
+ "Parallelism": 1
+ },
+ "EndpointSpec": {}
+ },
+ "Endpoint": {
+ "Spec": {}
+ }
+ }
+ ]
+ ```
+
+4. Run `docker service tasks SERVICE-ID` to see which nodes are running the
+service:
+
+ ```
+ $ docker service tasks helloworld
+
+ ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
+ 1n6wif51j0w840udalgw6hphg helloworld.1 helloworld alpine RUNNING RUNNING 19 minutes manager1
+ ```
+
+ In this case, the one instance of the `helloworld` service is running on the
+ `manager1` node. Manager nodes in a Swarm can execute tasks just like worker
+ nodes.
+
+ Swarm also shows you the `DESIRED STATE` and `LAST STATE` of the service
+ task so you can see if tasks are running according to the service
+ definition.
+
+4. Run `docker ps` on the node where the instance of the service is running to
+see the service container.
+
+ >**Tip**: If `helloworld` is running on a node other than your manager node,
+ you must ssh to that node.
+
+ ```bash
+ $docker ps
+
+ CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+ a0b6c02868ca alpine:latest "ping docker.com" 12 minutes ago Up 12 minutes helloworld.1.1n6wif51j0w840udalgw6hphg
+ ```
+
+## What's next?
+
+Next, you can [change the scale](scale-service.md) for the service running in
+the Swarm.
+
+ <p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/menu.md b/docs/swarm/swarm-tutorial/menu.md
new file mode 100644
index 0000000000..aec86ae87d
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/menu.md
@@ -0,0 +1,21 @@
+<!--[metadata]>
++++
+title = "Get started with Swarm"
+description = "Getting started tutorial for Docker Swarm"
+keywords = ["cluster, swarm, tutorial"]
+[menu.main]
+identifier="swarm-tutorial"
+parent="engine_swarm"
+weight=10
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Docker Swarm getting started tutorial
+
+## TOC
+
+- [Begin the tutorial](index.md) - Setup your environment to prepare
+ to build a Swarm.
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/rolling-update.md b/docs/swarm/swarm-tutorial/rolling-update.md
new file mode 100644
index 0000000000..20cb13488b
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/rolling-update.md
@@ -0,0 +1,105 @@
+<!--[metadata]>
++++
+title = "Apply rolling updates"
+description = "Apply rolling updates to a service on the Swarm"
+keywords = ["tutorial, cluster management, swarm, service, rolling-update"]
+[menu.main]
+identifier="swarm-tutorial-rolling-update"
+parent="swarm-tutorial"
+weight=20
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Apply rolling updates to a service
+
+In a previous step of the tutorial, you [scaled](scale-service.md) the number of
+instances of a service. In this part of the tutorial, you deploy a new Redis
+service and upgrade the service using rolling updates.
+
+1. If you haven't already, open a terminal and ssh into the machine where you
+run your manager node. For example, the tutorial uses a machine named
+`manager1`.
+
+2. Deploy Redis 3.0.6 to all nodes in the Swarm and configure
+the swarm to update one node every 10 seconds:
+
+ ```bash
+ $ docker service create --scale 3 --name redis --update-delay 10s --update-parallelism 1 redis:3.0.6
+
+ 8m228injfrhdym2zvzhl9k3l0
+ ```
+
+ You configure the rolling update policy at service deployment time.
+
+ The `--update-parallelism` flag configures the number of service tasks
+ to update simultaneously.
+
+ The `--update-delay` flag configures the time delay between updates to
+ a service task or sets of tasks. You can describe the time `T` in the number
+ of seconds `Ts`, minutes `Tm`, or hours `Th`. So `10m` indicates a 10 minute
+ delay.
+
+3. Inspect the `redis` service:
+ ```
+ $ docker service inspect redis --pretty
+
+ ID: 75kcmhuf8mif4a07738wttmgl
+ Name: redis
+ Mode: REPLICATED
+ Scale: 3
+ Placement:
+ Strategy: SPREAD
+ UpateConfig:
+ Parallelism: 1
+ Delay: 10s
+ ContainerSpec:
+ Image: redis:3.0.6
+ ```
+
+4. Now you can update the container image for `redis`. Swarm applies the update
+to nodes according to the `UpdateConfig` policy:
+
+ ```bash
+ $ docker service update --image redis:3.0.7 redis
+ redis
+ ```
+
+5. Run `docker service inspect --pretty redis` to see the new image in the
+desired state:
+
+ ```
+ docker service inspect --pretty redis
+
+ ID: 1yrcci9v8zj6cokua2eishlob
+ Name: redis
+ Mode: REPLICATED
+ Scale: 3
+ Placement:
+ Strategy: SPREAD
+ UpdateConfig:
+ Parallelism: 1
+ Delay: 10s
+ ContainerSpec:
+ Image: redis:3.0.7
+ ```
+
+6. Run `docker service tasks TASK-ID` to watch the rolling update:
+
+ ```
+ $ docker service tasks redis
+
+ ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
+ 5409nu4crb0smamziqwuug67u redis.1 redis redis:3.0.7 RUNNING RUNNING 21 seconds worker2
+ b8ezq58zugcg1trk8k7jrq9ym redis.2 redis redis:3.0.7 RUNNING RUNNING 1 seconds worker1
+ cgdcbipxnzx0y841vysiafb64 redis.3 redis redis:3.0.7 RUNNING RUNNING 11 seconds worker1
+ ```
+
+ Before Swarm updates all of the tasks, you can see that some are running
+ `redis:3.0.6` while others are running `redis:3.0.7`. The output above shows
+ the state once the rolling updates are done. You can see that each instances
+ entered the `RUNNING` state in 10 second increments.
+
+Next, learn about how to [drain a node](drain-node.md) in the Swarm.
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/docs/swarm/swarm-tutorial/scale-service.md b/docs/swarm/swarm-tutorial/scale-service.md
new file mode 100644
index 0000000000..5be6ddc48b
--- /dev/null
+++ b/docs/swarm/swarm-tutorial/scale-service.md
@@ -0,0 +1,75 @@
+<!--[metadata]>
++++
+title = "Scale the service"
+description = "Scale the service running in the Swarm"
+keywords = ["tutorial, cluster management, swarm, scale"]
+[menu.main]
+identifier="swarm-tutorial-scale-service"
+parent="swarm-tutorial"
+weight=18
+advisory = "rc"
++++
+<![end-metadata]-->
+
+# Scale the service in the Swarm
+
+Once you have [deployed a service](deploy-service.md) to a Swarm, you are ready
+to use the Docker CLI to scale the number of service tasks in
+the Swarm.
+
+1. If you haven't already, open a terminal and ssh into the machine where you
+run your manager node. For example, the tutorial uses a machine named
+`manager1`.
+
+2. Run the following command to change the desired state of the
+service runing in the Swarm:
+
+ ```
+ $ docker service update --scale NUMBER-OF-TASKS SERVICE-ID
+ ```
+
+ The `--scale` flag indicates the number of tasks you want in the new desired
+ state. For example:
+
+ ```
+ $ docker service update --scale 5 helloworld
+ helloworld
+ ```
+
+3. Run `docker service tasks SERVICE-ID` to see the updated task list:
+
+ ```
+ $ docker service tasks helloworld
+
+ ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
+ 1n6wif51j0w840udalgw6hphg helloworld.1 helloworld alpine RUNNING RUNNING 2 minutes manager1
+ dfhsosk00wxfb7j0cazp3fmhy helloworld.2 helloworld alpine RUNNING RUNNING 15 seconds worker2
+ 6cbedbeywo076zn54fnwc667a helloworld.3 helloworld alpine RUNNING RUNNING 15 seconds worker1
+ 7w80cafrry7asls96lm2tmwkz helloworld.4 helloworld alpine RUNNING RUNNING 10 seconds worker1
+ bn67kh76crn6du22ve2enqg5j helloworld.5 helloworld alpine RUNNING RUNNING 10 seconds manager1
+ ```
+
+ You can see that Swarm has created 4 new tasks to scale to a total of 5
+ running instances of Alpine Linux. The tasks are distributed between the
+ three nodes of the Swarm. Two are running on `manager1`.
+
+4. Run `docker ps` to see the containers running on the node where you're
+connected. The following example shows the tasks running on `manager1`:
+
+ ```
+ $ docker ps
+
+ CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+ 910669d5e188 alpine:latest "ping docker.com" 10 seconds ago Up 10 seconds helloworld.5.bn67kh76crn6du22ve2enqg5j
+ a0b6c02868ca alpine:latest "ping docker.com" 2 minutes ago Up 2 minutes helloworld.1.1n6wif51j0w840udalgw6hphg
+ ```
+
+ If you want to see the containers running on other nodes, you can ssh into
+ those nodes and run the `docker ps` command.
+
+## What's next?
+
+At this point in the tutorial, you're finished with the `helloworld` service.
+The next step shows how to [delete the service](delete-service.md).
+
+<p style="margin-bottom:300px">&nbsp;</p>
diff --git a/hack/vendor.sh b/hack/vendor.sh
index a619a476ee..5edc70a793 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -59,14 +59,14 @@ clone git github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git
clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
-clone git github.com/docker/go-connections v0.2.0
-clone git github.com/docker/engine-api 8c2141e14bb9e7540938d155976b3ef0661e4814
+clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d
+clone git github.com/docker/engine-api 6b2f24f16a7f1598635b6a99dbe38ec8a5eccaf8
clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
clone git github.com/imdario/mergo 0.2.1
#get libnetwork packages
-clone git github.com/docker/libnetwork b66c0385f30c6aa27b2957ed1072682c19a0b0b4
-clone git github.com/docker/go-events 2e7d352816128aa84f4d29b2a21d400133701a0d
+clone git github.com/docker/libnetwork e8da32ce5693f0ed6823d59c8415baf76c0809ea
+clone git github.com/docker/go-events 39718a26497694185f8fb58a7d6f31947f3dc42d
clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
@@ -75,7 +75,7 @@ clone git github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa07
clone git github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
clone git github.com/docker/libkv 7283ef27ed32fe267388510a91709b307bb9942c
clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-clone git github.com/vishvananda/netlink 631962935bff4f3d20ff32a72e8944f6d2836a26
+clone git github.com/vishvananda/netlink 7995ff5647a22cbf0dc41bf5c0e977bdb0d5c6b7
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
clone git github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
@@ -138,6 +138,27 @@ clone git github.com/docker/docker-credential-helpers v0.3.0
# containerd
clone git github.com/docker/containerd cf554d59dd96e459544748290eb9167f4bcde509
+# cluster
+clone git github.com/docker/swarmkit 45094b473cbdb2d45e4d8f703fb615989399ae29
+clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
+clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
+clone git github.com/cloudflare/cfssl 92f037e39eb103fb30f9151be40d9ed267fc4ae2
+clone git github.com/google/certificate-transparency 025a5cab06f6a819c455d9fdc9e2a1b6d0982284
+clone git golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 https://github.com/golang/crypto.git
+clone git github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47
+clone git github.com/hashicorp/go-memdb 98f52f52d7a476958fa9da671354d270c50661a7
+clone git github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+clone git github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+clone git github.com/coreos/pkg 2c77715c4df99b5420ffcae14ead08f52104065d
+clone git github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
+clone git github.com/prometheus/client_golang e51041b3fa41cece0dca035740ba6411905be473
+clone git github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
+clone git github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+clone git github.com/prometheus/common ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650
+clone git github.com/prometheus/procfs 454a56f35412459b5e684fd5ec0f9211b94f002a
+clone hg bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675
+clone git github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
+
# cli
clone git github.com/spf13/cobra 75205f23b3ea70dc7ae5e900d074e010c23c37e9 https://github.com/dnephin/cobra.git
clone git github.com/spf13/pflag cb88ea77998c3f024757528e3305022ab50b43be
diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go
index aab8e4a786..c9f57b7450 100644
--- a/integration-cli/check_test.go
+++ b/integration-cli/check_test.go
@@ -184,6 +184,61 @@ func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
s.ds.TearDownTest(c)
}
+const defaultSwarmPort = 2477
+
+func init() {
+ check.Suite(&DockerSwarmSuite{
+ ds: &DockerSuite{},
+ })
+}
+
+type DockerSwarmSuite struct {
+ ds *DockerSuite
+ daemons []*SwarmDaemon
+ portIndex int
+}
+
+func (s *DockerSwarmSuite) SetUpTest(c *check.C) {
+ testRequires(c, DaemonIsLinux)
+}
+
+func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon {
+ d := &SwarmDaemon{
+ Daemon: NewDaemon(c),
+ port: defaultSwarmPort + s.portIndex,
+ }
+ d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port)
+ err := d.StartWithBusybox()
+ c.Assert(err, check.IsNil)
+
+ if joinSwarm == true {
+ if len(s.daemons) > 0 {
+ c.Assert(d.Join(s.daemons[0].listenAddr, "", "", manager), check.IsNil)
+ } else {
+ aa := make(map[string]bool)
+ aa["worker"] = true
+ aa["manager"] = true
+ c.Assert(d.Init(aa, ""), check.IsNil)
+ }
+ }
+
+ s.portIndex++
+ s.daemons = append(s.daemons, d)
+
+ return d
+}
+
+func (s *DockerSwarmSuite) TearDownTest(c *check.C) {
+ testRequires(c, DaemonIsLinux)
+ for _, d := range s.daemons {
+ d.Stop()
+ }
+ s.daemons = nil
+ s.portIndex = 0
+
+ s.ds.TearDownTest(c)
+}
+
func init() {
check.Suite(&DockerTrustSuite{
ds: &DockerSuite{},
diff --git a/integration-cli/daemon.go b/integration-cli/daemon.go
index a46d14d052..9a7f07d564 100644
--- a/integration-cli/daemon.go
+++ b/integration-cli/daemon.go
@@ -1,6 +1,7 @@
package main
import (
+ "bytes"
"encoding/json"
"errors"
"fmt"
@@ -292,9 +293,9 @@ out1:
select {
case err := <-d.wait:
return err
- case <-time.After(15 * time.Second):
+ case <-time.After(20 * time.Second):
// time for stopping jobs and run onShutdown hooks
- d.c.Log("timeout")
+ d.c.Logf("timeout: %v", d.id)
break out1
}
}
@@ -306,7 +307,7 @@ out2:
return err
case <-tick:
i++
- if i > 4 {
+ if i > 5 {
d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
break out2
}
@@ -452,6 +453,27 @@ func (d *Daemon) CmdWithArgs(daemonArgs []string, name string, arg ...string) (s
return string(b), err
}
+// SockRequest executes a socket request on a daemon and returns statuscode and output.
+func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) {
+ jsonData := bytes.NewBuffer(nil)
+ if err := json.NewEncoder(jsonData).Encode(data); err != nil {
+ return -1, nil, err
+ }
+
+ res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json")
+ if err != nil {
+ return -1, nil, err
+ }
+ b, err := readBody(body)
+ return res.StatusCode, b, err
+}
+
+// SockRequestRaw executes a socket request on a daemon and returns a http
+// response and a reader for the output data.
+func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) {
+ return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock())
+}
+
// LogFileName returns the path the the daemon's log file
func (d *Daemon) LogFileName() string {
return d.logFile.Name()
@@ -461,6 +483,16 @@ func (d *Daemon) getIDByName(name string) (string, error) {
return d.inspectFieldWithError(name, "Id")
}
+func (d *Daemon) activeContainers() (ids []string) {
+ out, _ := d.Cmd("ps", "-q")
+ for _, id := range strings.Split(out, "\n") {
+ if id = strings.TrimSpace(id); id != "" {
+ ids = append(ids, id)
+ }
+ }
+ return
+}
+
func (d *Daemon) inspectFilter(name, filter string) (string, error) {
format := fmt.Sprintf("{{%s}}", filter)
out, err := d.Cmd("inspect", "-f", format, name)
@@ -486,3 +518,12 @@ func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, build
buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...)
return runCommandWithOutput(buildCmd)
}
+
+func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) {
+ out, err := d.Cmd("ps", "-q")
+ c.Assert(err, checker.IsNil)
+ if len(strings.TrimSpace(out)) == 0 {
+ return 0, nil
+ }
+ return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out))
+}
diff --git a/integration-cli/daemon_swarm.go b/integration-cli/daemon_swarm.go
new file mode 100644
index 0000000000..0b553d34f8
--- /dev/null
+++ b/integration-cli/daemon_swarm.go
@@ -0,0 +1,178 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/docker/docker/pkg/integration/checker"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/go-check/check"
+)
+
+// SwarmDaemon is a test daemon with helpers for participating in a swarm.
+type SwarmDaemon struct {
+ *Daemon
+ swarm.Info
+ port int
+ listenAddr string
+}
+
+// Init initializes a new swarm cluster.
+func (d *SwarmDaemon) Init(autoAccept map[string]bool, secret string) error {
+ req := swarm.InitRequest{
+ ListenAddr: d.listenAddr,
+ }
+ for _, role := range []swarm.NodeRole{swarm.NodeRoleManager, swarm.NodeRoleWorker} {
+ req.Spec.AcceptancePolicy.Policies = append(req.Spec.AcceptancePolicy.Policies, swarm.Policy{
+ Role: role,
+ Autoaccept: autoAccept[strings.ToLower(string(role))],
+ Secret: secret,
+ })
+ }
+ status, out, err := d.SockRequest("POST", "/swarm/init", req)
+ if status != http.StatusOK {
+ return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out)
+ }
+ if err != nil {
+ return fmt.Errorf("initializing swarm: %v", err)
+ }
+ info, err := d.info()
+ if err != nil {
+ return err
+ }
+ d.Info = info
+ return nil
+}
+
+// Join joins a current daemon with existing cluster.
+func (d *SwarmDaemon) Join(remoteAddr, secret, cahash string, manager bool) error {
+ status, out, err := d.SockRequest("POST", "/swarm/join", swarm.JoinRequest{
+ ListenAddr: d.listenAddr,
+ RemoteAddrs: []string{remoteAddr},
+ Manager: manager,
+ Secret: secret,
+ CACertHash: cahash,
+ })
+ if status != http.StatusOK {
+ return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out)
+ }
+ if err != nil {
+ return fmt.Errorf("joining swarm: %v", err)
+ }
+ info, err := d.info()
+ if err != nil {
+ return err
+ }
+ d.Info = info
+ return nil
+}
+
+// Leave forces daemon to leave current cluster.
+func (d *SwarmDaemon) Leave(force bool) error {
+ url := "/swarm/leave"
+ if force {
+ url += "?force=1"
+ }
+ status, out, err := d.SockRequest("POST", url, nil)
+ if status != http.StatusOK {
+ return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out)
+ }
+ if err != nil {
+ err = fmt.Errorf("leaving swarm: %v", err)
+ }
+ return err
+}
+
+func (d *SwarmDaemon) info() (swarm.Info, error) {
+ var info struct {
+ Swarm swarm.Info
+ }
+ status, dt, err := d.SockRequest("GET", "/info", nil)
+ if status != http.StatusOK {
+ return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status)
+ }
+ if err != nil {
+ return info.Swarm, fmt.Errorf("get swarm info: %v", err)
+ }
+ if err := json.Unmarshal(dt, &info); err != nil {
+ return info.Swarm, err
+ }
+ return info.Swarm, nil
+}
+
+type serviceConstructor func(*swarm.Service)
+type nodeConstructor func(*swarm.Node)
+
+func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string {
+ var service swarm.Service
+ for _, fn := range f {
+ fn(&service)
+ }
+ status, out, err := d.SockRequest("POST", "/services/create", service.Spec)
+
+ c.Assert(err, checker.IsNil)
+ c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out)))
+
+ var scr types.ServiceCreateResponse
+ c.Assert(json.Unmarshal(out, &scr), checker.IsNil)
+ return scr.ID
+}
+
+func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service {
+ var service swarm.Service
+ status, out, err := d.SockRequest("GET", "/services/"+id, nil)
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+ c.Assert(err, checker.IsNil)
+ c.Assert(json.Unmarshal(out, &service), checker.IsNil)
+ c.Assert(service.ID, checker.Equals, id)
+ return &service
+}
+
+func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) {
+ for _, fn := range f {
+ fn(service)
+ }
+ url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index)
+ status, out, err := d.SockRequest("POST", url, service.Spec)
+ c.Assert(err, checker.IsNil)
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+}
+
+func (d *SwarmDaemon) removeService(c *check.C, id string) {
+ status, out, err := d.SockRequest("DELETE", "/services/"+id, nil)
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+ c.Assert(err, checker.IsNil)
+}
+
+func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node {
+ var node swarm.Node
+ status, out, err := d.SockRequest("GET", "/nodes/"+id, nil)
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+ c.Assert(err, checker.IsNil)
+ c.Assert(json.Unmarshal(out, &node), checker.IsNil)
+ c.Assert(node.ID, checker.Equals, id)
+ return &node
+}
+
+func (d *SwarmDaemon) updateNode(c *check.C, node *swarm.Node, f ...nodeConstructor) {
+ for _, fn := range f {
+ fn(node)
+ }
+ url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
+ status, out, err := d.SockRequest("POST", url, node.Spec)
+ c.Assert(err, checker.IsNil)
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+}
+
+func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node {
+ status, out, err := d.SockRequest("GET", "/nodes", nil)
+ c.Assert(err, checker.IsNil)
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+
+ nodes := []swarm.Node{}
+ c.Assert(json.Unmarshal(out, &nodes), checker.IsNil)
+ return nodes
+}
diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go
index a9fcc962b6..740ce6ecdd 100644
--- a/integration-cli/docker_api_attach_test.go
+++ b/integration-cli/docker_api_attach_test.go
@@ -17,7 +17,7 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat")
- rwc, err := sockConn(time.Duration(10 * time.Second))
+ rwc, err := sockConn(time.Duration(10*time.Second), "")
c.Assert(err, checker.IsNil)
cleanedContainerID := strings.TrimSpace(out)
@@ -67,7 +67,7 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) {
// regression gh14320
func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) {
- req, client, err := newRequestClient("POST", "/containers/doesnotexist/attach", nil, "")
+ req, client, err := newRequestClient("POST", "/containers/doesnotexist/attach", nil, "", "")
c.Assert(err, checker.IsNil)
resp, err := client.Do(req)
diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go
index 58f0b40fed..1e2c4a891c 100644
--- a/integration-cli/docker_api_containers_test.go
+++ b/integration-cli/docker_api_containers_test.go
@@ -1076,7 +1076,7 @@ func (s *DockerSuite) TestContainerApiChunkedEncoding(c *check.C) {
// TODO Windows CI: This can be ported
testRequires(c, DaemonIsLinux)
- conn, err := sockConn(time.Duration(10 * time.Second))
+ conn, err := sockConn(time.Duration(10*time.Second), "")
c.Assert(err, checker.IsNil)
client := httputil.NewClientConn(conn, nil)
defer client.Close()
diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go
new file mode 100644
index 0000000000..4d85803f84
--- /dev/null
+++ b/integration-cli/docker_api_swarm_test.go
@@ -0,0 +1,573 @@
+// +build !windows
+
+package main
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/integration/checker"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/go-check/check"
+)
+
+var defaultReconciliationTimeout = 30 * time.Second
+
+func (s *DockerSwarmSuite) TestApiSwarmInit(c *check.C) {
+ // todo: should find a better way to verify that components are running than /info
+ d1 := s.AddDaemon(c, true, true)
+ info, err := d1.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.ControlAvailable, checker.Equals, true)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+
+ d2 := s.AddDaemon(c, true, false)
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.ControlAvailable, checker.Equals, false)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+
+ // Leaving cluster
+ c.Assert(d2.Leave(false), checker.IsNil)
+
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.ControlAvailable, checker.Equals, false)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+ c.Assert(d2.Join(d1.listenAddr, "", "", false), checker.IsNil)
+
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.ControlAvailable, checker.Equals, false)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+
+ // Current state restoring after restarts
+ err = d1.Stop()
+ c.Assert(err, checker.IsNil)
+ err = d2.Stop()
+ c.Assert(err, checker.IsNil)
+
+ err = d1.Start()
+ c.Assert(err, checker.IsNil)
+ err = d2.Start()
+ c.Assert(err, checker.IsNil)
+
+ info, err = d1.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.ControlAvailable, checker.Equals, true)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.ControlAvailable, checker.Equals, false)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmManualAcceptance(c *check.C) {
+ s.testAPISwarmManualAcceptance(c, "")
+}
+func (s *DockerSwarmSuite) TestApiSwarmManualAcceptanceSecret(c *check.C) {
+ s.testAPISwarmManualAcceptance(c, "foobaz")
+}
+
+func (s *DockerSwarmSuite) testAPISwarmManualAcceptance(c *check.C, secret string) {
+ d1 := s.AddDaemon(c, false, false)
+ c.Assert(d1.Init(map[string]bool{}, secret), checker.IsNil)
+
+ d2 := s.AddDaemon(c, false, false)
+ err := d2.Join(d1.listenAddr, "", "", false)
+ c.Assert(err, checker.NotNil)
+ if secret == "" {
+ c.Assert(err.Error(), checker.Contains, "Timeout reached")
+ info, err := d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
+ c.Assert(d2.Leave(false), checker.IsNil)
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+ } else {
+ c.Assert(err.Error(), checker.Contains, "valid secret token is necessary")
+ info, err := d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+ }
+ d3 := s.AddDaemon(c, false, false)
+ go func() {
+ for i := 0; ; i++ {
+ info, err := d3.info()
+ c.Assert(err, checker.IsNil)
+ if info.NodeID != "" {
+ d1.updateNode(c, d1.getNode(c, info.NodeID), func(n *swarm.Node) {
+ n.Spec.Membership = swarm.NodeMembershipAccepted
+ })
+ return
+ }
+ if i >= 10 {
+ c.Errorf("could not find nodeID")
+ }
+ time.Sleep(300 * time.Millisecond)
+ }
+ }()
+ c.Assert(d3.Join(d1.listenAddr, secret, "", false), checker.IsNil)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
+ d1 := s.AddDaemon(c, false, false)
+ aa := make(map[string]bool)
+ aa["worker"] = true
+ c.Assert(d1.Init(aa, "foobar"), checker.IsNil)
+
+ d2 := s.AddDaemon(c, false, false)
+ err := d2.Join(d1.listenAddr, "", "", false)
+ c.Assert(err, checker.NotNil)
+ c.Assert(err.Error(), checker.Contains, "secret token is necessary")
+ info, err := d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+ err = d2.Join(d1.listenAddr, "foobaz", "", false)
+ c.Assert(err, checker.NotNil)
+ c.Assert(err.Error(), checker.Contains, "secret token is necessary")
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+ c.Assert(d2.Join(d1.listenAddr, "foobar", "", false), checker.IsNil)
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+ c.Assert(d2.Leave(false), checker.IsNil)
+ info, err = d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) {
+ d1 := s.AddDaemon(c, true, true)
+ d2 := s.AddDaemon(c, false, false)
+ err := d2.Join(d1.listenAddr, "", "foobar", false)
+ c.Assert(err, checker.NotNil)
+ c.Assert(err.Error(), checker.Contains, "invalid checksum digest format")
+
+ c.Assert(len(d1.CACertHash), checker.GreaterThan, 0)
+ c.Assert(d2.Join(d1.listenAddr, "", d1.CACertHash, false), checker.IsNil)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) {
+ d1 := s.AddDaemon(c, false, false)
+ c.Assert(d1.Init(map[string]bool{"worker": true}, ""), checker.IsNil)
+ d2 := s.AddDaemon(c, true, false)
+
+ info, err := d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.ControlAvailable, checker.Equals, false)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+
+ d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+ n.Spec.Role = swarm.NodeRoleManager
+ })
+
+ for i := 0; ; i++ {
+ info, err := d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+ if info.ControlAvailable {
+ break
+ }
+ if i > 10 {
+ c.Errorf("node did not turn into manager")
+ } else {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+ n.Spec.Role = swarm.NodeRoleWorker
+ })
+
+ for i := 0; ; i++ {
+ info, err := d2.info()
+ c.Assert(err, checker.IsNil)
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+ if !info.ControlAvailable {
+ break
+ }
+ if i > 10 {
+ c.Errorf("node did not turn into manager")
+ } else {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // todo: test raft qourum stability
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmServicesCreate(c *check.C) {
+ d := s.AddDaemon(c, true, true)
+
+ instances := 2
+ id := d.createService(c, simpleTestService, setInstances(instances))
+ waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances)
+
+ service := d.getService(c, id)
+ instances = 5
+ d.updateService(c, service, setInstances(instances))
+ waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances)
+
+ d.removeService(c, service.ID)
+ waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmServicesMultipleAgents(c *check.C) {
+ d1 := s.AddDaemon(c, true, true)
+ d2 := s.AddDaemon(c, true, false)
+ d3 := s.AddDaemon(c, true, false)
+
+ time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks
+
+ instances := 9
+ id := d1.createService(c, simpleTestService, setInstances(instances))
+
+ waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0)
+ waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0)
+ waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0)
+
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances)
+
+ // reconciliation on d2 node down
+ c.Assert(d2.Stop(), checker.IsNil)
+
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances)
+
+ // test downscaling
+ instances = 5
+ d1.updateService(c, d1.getService(c, id), setInstances(instances))
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances)
+
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmServicesCreateGlobal(c *check.C) {
+ d1 := s.AddDaemon(c, true, true)
+ d2 := s.AddDaemon(c, true, false)
+ d3 := s.AddDaemon(c, true, false)
+
+ d1.createService(c, simpleTestService, setGlobalMode)
+
+ waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1)
+ waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1)
+ waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1)
+
+ d4 := s.AddDaemon(c, true, false)
+ d5 := s.AddDaemon(c, true, false)
+
+ waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1)
+ waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmServicesStateReporting(c *check.C) {
+ testRequires(c, SameHostDaemon)
+ testRequires(c, DaemonIsLinux)
+
+ d1 := s.AddDaemon(c, true, true)
+ d2 := s.AddDaemon(c, true, true)
+ d3 := s.AddDaemon(c, true, false)
+
+ time.Sleep(1 * time.Second) // make sure all daemons are ready to accept
+
+ instances := 9
+ d1.createService(c, simpleTestService, setInstances(instances))
+
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances)
+
+ getContainers := func() map[string]*SwarmDaemon {
+ m := make(map[string]*SwarmDaemon)
+ for _, d := range []*SwarmDaemon{d1, d2, d3} {
+ for _, id := range d.activeContainers() {
+ m[id] = d
+ }
+ }
+ return m
+ }
+
+ containers := getContainers()
+ c.Assert(containers, checker.HasLen, instances)
+ var toRemove string
+ for i := range containers {
+ toRemove = i
+ }
+
+ _, err := containers[toRemove].Cmd("stop", toRemove)
+ c.Assert(err, checker.IsNil)
+
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances)
+
+ containers2 := getContainers()
+ c.Assert(containers2, checker.HasLen, instances)
+ for i := range containers {
+ if i == toRemove {
+ c.Assert(containers2[i], checker.IsNil)
+ } else {
+ c.Assert(containers2[i], checker.NotNil)
+ }
+ }
+
+ containers = containers2
+ for i := range containers {
+ toRemove = i
+ }
+
+ // try with killing process outside of docker
+ pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove)
+ c.Assert(err, checker.IsNil)
+ pid, err := strconv.Atoi(strings.TrimSpace(pidStr))
+ c.Assert(err, checker.IsNil)
+ c.Assert(syscall.Kill(pid, syscall.SIGKILL), checker.IsNil)
+
+ time.Sleep(time.Second) // give some time to handle the signal
+
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances)
+
+ containers2 = getContainers()
+ c.Assert(containers2, checker.HasLen, instances)
+ for i := range containers {
+ if i == toRemove {
+ c.Assert(containers2[i], checker.IsNil)
+ } else {
+ c.Assert(containers2[i], checker.NotNil)
+ }
+ }
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmRaftQuorum(c *check.C) {
+ d1 := s.AddDaemon(c, true, true)
+ d2 := s.AddDaemon(c, true, true)
+ d3 := s.AddDaemon(c, true, true)
+
+ d1.createService(c, simpleTestService)
+
+ c.Assert(d2.Stop(), checker.IsNil)
+
+ d1.createService(c, simpleTestService, func(s *swarm.Service) {
+ s.Spec.Name = "top1"
+ })
+
+ c.Assert(d3.Stop(), checker.IsNil)
+
+ var service swarm.Service
+ simpleTestService(&service)
+ service.Spec.Name = "top2"
+ status, out, err := d1.SockRequest("POST", "/services/create", service.Spec)
+ c.Assert(err, checker.IsNil)
+ c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out)))
+
+ c.Assert(d2.Start(), checker.IsNil)
+
+ d1.createService(c, simpleTestService, func(s *swarm.Service) {
+ s.Spec.Name = "top3"
+ })
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmListNodes(c *check.C) {
+ d1 := s.AddDaemon(c, true, true)
+ d2 := s.AddDaemon(c, true, false)
+ d3 := s.AddDaemon(c, true, false)
+
+ nodes := d1.listNodes(c)
+ c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes))
+
+loop0:
+ for _, n := range nodes {
+ for _, d := range []*SwarmDaemon{d1, d2, d3} {
+ if n.ID == d.NodeID {
+ continue loop0
+ }
+ }
+ c.Errorf("unknown nodeID %v", n.ID)
+ }
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmNodeUpdate(c *check.C) {
+ d := s.AddDaemon(c, true, true)
+
+ nodes := d.listNodes(c)
+
+ d.updateNode(c, d.getNode(c, nodes[0].ID), func(n *swarm.Node) {
+ n.Spec.Availability = swarm.NodeAvailabilityPause
+ })
+
+ n := d.getNode(c, nodes[0].ID)
+ c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) {
+ d1 := s.AddDaemon(c, true, true)
+ d2 := s.AddDaemon(c, true, false)
+
+ time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks
+
+ // start a service, expect balanced distribution
+ instances := 8
+ id := d1.createService(c, simpleTestService, setInstances(instances))
+
+ waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0)
+ waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0)
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances)
+
+ // drain d2, all containers should move to d1
+ d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+ n.Spec.Availability = swarm.NodeAvailabilityDrain
+ })
+ waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances)
+ waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0)
+
+ // set d2 back to active
+ d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+ n.Spec.Availability = swarm.NodeAvailabilityActive
+ })
+
+ // change environment variable, resulting balanced rescheduling
+ d1.updateService(c, d1.getService(c, id), func(s *swarm.Service) {
+ s.Spec.TaskTemplate.ContainerSpec.Env = []string{"FOO=BAR"}
+ s.Spec.UpdateConfig = &swarm.UpdateConfig{
+ Parallelism: 2,
+ Delay: 250 * time.Millisecond,
+ }
+ })
+
+ // drained node first so we don't get any old containers
+ waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0)
+ waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0)
+ waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances)
+
+ d2ContainerCount := len(d2.activeContainers())
+
+ // set d2 to paused, scale service up, only d1 gets new tasks
+ d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+ n.Spec.Availability = swarm.NodeAvailabilityPause
+ })
+
+ instances = 14
+ d1.updateService(c, d1.getService(c, id), setInstances(instances))
+
+ waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount)
+ waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount)
+
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmLeaveRemovesContainer(c *check.C) {
+ d := s.AddDaemon(c, true, true)
+
+ instances := 2
+ d.createService(c, simpleTestService, setInstances(instances))
+
+ id, err := d.Cmd("run", "-d", "busybox", "top")
+ c.Assert(err, checker.IsNil)
+ id = strings.TrimSpace(id)
+
+ waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1)
+
+ c.Assert(d.Leave(false), checker.NotNil)
+ c.Assert(d.Leave(true), checker.IsNil)
+
+ waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1)
+
+ id2, err := d.Cmd("ps", "-q")
+ c.Assert(err, checker.IsNil)
+ c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmManagerRestore(c *check.C) {
+ d1 := s.AddDaemon(c, true, true)
+
+ instances := 2
+ id := d1.createService(c, simpleTestService, setInstances(instances))
+
+ d1.getService(c, id)
+ d1.Stop()
+ d1.Start()
+ d1.getService(c, id)
+
+ d2 := s.AddDaemon(c, true, true)
+ d2.getService(c, id)
+ d2.Stop()
+ d2.Start()
+ d2.getService(c, id)
+
+ d3 := s.AddDaemon(c, true, true)
+ d3.getService(c, id)
+ d3.Stop()
+ d3.Start()
+ d3.getService(c, id)
+
+ d3.Kill()
+ time.Sleep(1 * time.Second) // time to handle signal
+ d3.Start()
+ d3.getService(c, id)
+}
+
+func (s *DockerSwarmSuite) TestApiSwarmScaleNoRollingUpdate(c *check.C) {
+ d := s.AddDaemon(c, true, true)
+
+ instances := 2
+ id := d.createService(c, simpleTestService, setInstances(instances))
+
+ waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances)
+ containers := d.activeContainers()
+ instances = 4
+ d.updateService(c, d.getService(c, id), setInstances(instances))
+ waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances)
+ containers2 := d.activeContainers()
+
+loop0:
+ for _, c1 := range containers {
+ for _, c2 := range containers2 {
+ if c1 == c2 {
+ continue loop0
+ }
+ }
+ c.Errorf("container %v not found in new set %#v", c1, containers2)
+ }
+}
+
+func simpleTestService(s *swarm.Service) {
+ var ureplicas uint64
+ ureplicas = 1
+ s.Spec = swarm.ServiceSpec{
+ TaskTemplate: swarm.TaskSpec{
+ ContainerSpec: swarm.ContainerSpec{
+ Image: "busybox:latest",
+ Command: []string{"/bin/top"},
+ },
+ },
+ Mode: swarm.ServiceMode{
+ Replicated: &swarm.ReplicatedService{
+ Replicas: &ureplicas,
+ },
+ },
+ }
+ s.Spec.Name = "top"
+}
+
+func setInstances(replicas int) serviceConstructor {
+ ureplicas := uint64(replicas)
+ return func(s *swarm.Service) {
+ s.Spec.Mode = swarm.ServiceMode{
+ Replicated: &swarm.ReplicatedService{
+ Replicas: &ureplicas,
+ },
+ }
+ }
+}
+
+func setGlobalMode(s *swarm.Service) {
+ s.Spec.Mode = swarm.ServiceMode{
+ Global: &swarm.GlobalService{},
+ }
+}
diff --git a/integration-cli/docker_api_test.go b/integration-cli/docker_api_test.go
index 35ab1ee459..d09b8f193b 100644
--- a/integration-cli/docker_api_test.go
+++ b/integration-cli/docker_api_test.go
@@ -34,7 +34,7 @@ func (s *DockerSuite) TestApiGetEnabledCors(c *check.C) {
}
func (s *DockerSuite) TestApiVersionStatusCode(c *check.C) {
- conn, err := sockConn(time.Duration(10 * time.Second))
+ conn, err := sockConn(time.Duration(10*time.Second), "")
c.Assert(err, checker.IsNil)
client := httputil.NewClientConn(conn, nil)
diff --git a/integration-cli/docker_cli_rename_test.go b/integration-cli/docker_cli_rename_test.go
index 74389a2ac7..76bbcfea9f 100644
--- a/integration-cli/docker_cli_rename_test.go
+++ b/integration-cli/docker_cli_rename_test.go
@@ -63,7 +63,7 @@ func (s *DockerSuite) TestRenameCheckNames(c *check.C) {
name, err := inspectFieldWithError("first_name", "Name")
c.Assert(err, checker.NotNil, check.Commentf(name))
- c.Assert(err.Error(), checker.Contains, "No such image or container: first_name")
+ c.Assert(err.Error(), checker.Contains, "No such image, container or task: first_name")
}
func (s *DockerSuite) TestRenameInvalidName(c *check.C) {
diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go
index 67a14c6abe..73a7a85efa 100644
--- a/integration-cli/docker_utils.go
+++ b/integration-cli/docker_utils.go
@@ -124,8 +124,10 @@ func getTLSConfig() (*tls.Config, error) {
return tlsConfig, nil
}
-func sockConn(timeout time.Duration) (net.Conn, error) {
- daemon := daemonHost()
+func sockConn(timeout time.Duration, daemon string) (net.Conn, error) {
+ if daemon == "" {
+ daemon = daemonHost()
+ }
daemonURL, err := url.Parse(daemon)
if err != nil {
return nil, fmt.Errorf("could not parse url %q: %v", daemon, err)
@@ -168,7 +170,11 @@ func sockRequest(method, endpoint string, data interface{}) (int, []byte, error)
}
func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) {
- req, client, err := newRequestClient(method, endpoint, data, ct)
+ return sockRequestRawToDaemon(method, endpoint, data, ct, "")
+}
+
+func sockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) {
+ req, client, err := newRequestClient(method, endpoint, data, ct, daemon)
if err != nil {
return nil, nil, err
}
@@ -187,7 +193,7 @@ func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.R
}
func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) {
- req, client, err := newRequestClient(method, endpoint, data, ct)
+ req, client, err := newRequestClient(method, endpoint, data, ct, "")
if err != nil {
return nil, nil, err
}
@@ -197,8 +203,8 @@ func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.
return conn, br, nil
}
-func newRequestClient(method, endpoint string, data io.Reader, ct string) (*http.Request, *httputil.ClientConn, error) {
- c, err := sockConn(time.Duration(10 * time.Second))
+func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) {
+ c, err := sockConn(time.Duration(10*time.Second), daemon)
if err != nil {
return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err)
}
@@ -1514,3 +1520,50 @@ func getErrorMessage(c *check.C, body []byte) string {
c.Assert(json.Unmarshal(body, &resp), check.IsNil)
return strings.TrimSpace(resp.Message)
}
+
+func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) {
+ after := time.After(timeout)
+ for {
+ v, comment := f(c)
+ assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params)
+ select {
+ case <-after:
+ assert = true
+ default:
+ }
+ if assert {
+ if comment != nil {
+ args = append(args, comment)
+ }
+ c.Assert(v, checker, args...)
+ return
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+}
+
+type checkF func(*check.C) (interface{}, check.CommentInterface)
+type reducer func(...interface{}) interface{}
+
+func reducedCheck(r reducer, funcs ...checkF) checkF {
+ return func(c *check.C) (interface{}, check.CommentInterface) {
+ var values []interface{}
+ var comments []string
+ for _, f := range funcs {
+ v, comment := f(c)
+ values = append(values, v)
+ if comment != nil {
+ comments = append(comments, comment.CheckCommentString())
+ }
+ }
+ return r(values...), check.Commentf("%v", strings.Join(comments, ", "))
+ }
+}
+
+func sumAsIntegers(vals ...interface{}) interface{} {
+ var s int
+ for _, v := range vals {
+ s += v.(int)
+ }
+ return s
+}
diff --git a/opts/opts.go b/opts/opts.go
index 9bd8040d25..1b9d6b294a 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -5,6 +5,8 @@ import (
"net"
"regexp"
"strings"
+
+ "github.com/docker/engine-api/types/filters"
)
var (
@@ -282,3 +284,38 @@ func ValidateSysctl(val string) (string, error) {
}
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
+
+// FilterOpt is a flag type for validating filters
+type FilterOpt struct {
+ filter filters.Args
+}
+
+// NewFilterOpt returns a new FilterOpt
+func NewFilterOpt() FilterOpt {
+ return FilterOpt{filter: filters.NewArgs()}
+}
+
+func (o *FilterOpt) String() string {
+ repr, err := filters.ToParam(o.filter)
+ if err != nil {
+ return "invalid filters"
+ }
+ return repr
+}
+
+// Set sets the value of the opt by parsing the command line value
+func (o *FilterOpt) Set(value string) error {
+ var err error
+ o.filter, err = filters.ParseFlag(value, o.filter)
+ return err
+}
+
+// Type returns the option type
+func (o *FilterOpt) Type() string {
+ return "filter"
+}
+
+// Value returns the value of this option
+func (o *FilterOpt) Value() filters.Args {
+ return o.filter
+}
diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go
index 0568791ddd..c06b6ebfa1 100644
--- a/runconfig/hostconfig_unix.go
+++ b/runconfig/hostconfig_unix.go
@@ -19,7 +19,7 @@ func DefaultDaemonNetworkMode() container.NetworkMode {
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
func IsPreDefinedNetwork(network string) bool {
n := container.NetworkMode(network)
- return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault()
+ return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress"
}
// ValidateNetMode ensures that the various combinations of requested
diff --git a/vendor/src/bitbucket.org/ww/goautoneg/Makefile b/vendor/src/bitbucket.org/ww/goautoneg/Makefile
new file mode 100644
index 0000000000..e33ee17303
--- /dev/null
+++ b/vendor/src/bitbucket.org/ww/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+ gofmt -w *.go
+
+docs:
+ gomake clean
+ godoc ${TARG} > README.txt
diff --git a/vendor/src/bitbucket.org/ww/goautoneg/README.txt b/vendor/src/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 0000000000..7723656d58
--- /dev/null
+++ b/vendor/src/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 0000000000..648b38cb65
--- /dev/null
+++ b/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000000..1602287d7c
--- /dev/null
+++ b/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/src/github.com/beorn7/perks/quantile/stream.go b/vendor/src/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000000..587b1fc5ba
--- /dev/null
+++ b/vendor/src/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(float64(l) * q)
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/LICENSE b/vendor/src/github.com/cloudflare/cfssl/LICENSE
new file mode 100644
index 0000000000..bc5841fa55
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2014 CloudFlare Inc.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/cloudflare/cfssl/auth/auth.go b/vendor/src/github.com/cloudflare/cfssl/auth/auth.go
new file mode 100644
index 0000000000..ecd5e5fefd
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/auth/auth.go
@@ -0,0 +1,94 @@
+// Package auth implements an interface for providing CFSSL
+// authentication. This is meant to authenticate a client CFSSL to a
+// remote CFSSL in order to prevent unauthorised use of the signature
+// capabilities. This package provides both the interface and a
+// standard HMAC-based implementation.
+package auth
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+// An AuthenticatedRequest contains a request and authentication
+// token. The Provider may determine whether to validate the timestamp
+// and remote address.
+type AuthenticatedRequest struct {
+ // An Authenticator decides whether to use this field.
+ Timestamp int64 `json:"timestamp,omitempty"`
+ RemoteAddress []byte `json:"remote_address,omitempty"`
+ Token []byte `json:"token"`
+ Request []byte `json:"request"`
+}
+
+// A Provider can generate tokens from a request and verify a
+// request. The handling of additional authentication data (such as
+// the IP address) is handled by the concrete type, as is any
+// serialisation and state-keeping.
+type Provider interface {
+ Token(req []byte) (token []byte, err error)
+ Verify(aReq *AuthenticatedRequest) bool
+}
+
+// Standard implements an HMAC-SHA-256 authentication provider. It may
+// be supplied additional data at creation time that will be used as
+// request || additional-data with the HMAC.
+type Standard struct {
+ key []byte
+ ad []byte
+}
+
+// New generates a new standard authentication provider from the key
+// and additional data. The additional data will be used when
+// generating a new token.
+func New(key string, ad []byte) (*Standard, error) {
+ if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 {
+ switch splitKey[0] {
+ case "env":
+ key = os.Getenv(splitKey[1])
+ case "file":
+ data, err := ioutil.ReadFile(splitKey[1])
+ if err != nil {
+ return nil, err
+ }
+ key = string(data)
+ default:
+ return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0])
+ }
+ }
+
+ keyBytes, err := hex.DecodeString(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Standard{keyBytes, ad}, nil
+}
+
+// Token generates a new authentication token from the request.
+func (p Standard) Token(req []byte) (token []byte, err error) {
+ h := hmac.New(sha256.New, p.key)
+ h.Write(req)
+ h.Write(p.ad)
+ return h.Sum(nil), nil
+}
+
+// Verify determines whether an authenticated request is valid.
+func (p Standard) Verify(ad *AuthenticatedRequest) bool {
+ if ad == nil {
+ return false
+ }
+
+ // Standard token generation returns no error.
+ token, _ := p.Token(ad.Request)
+ if len(ad.Token) != len(token) {
+ return false
+ }
+
+ return hmac.Equal(token, ad.Token)
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/certdb/README.md b/vendor/src/github.com/cloudflare/cfssl/certdb/README.md
new file mode 100644
index 0000000000..18c6a28030
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/certdb/README.md
@@ -0,0 +1,58 @@
+# certdb usage
+
+Using a database enables additional functionality for existing commands when a
+db config is provided:
+
+ - `sign` and `gencert` add a certificate to the certdb after signing it
+ - `serve` enables database functionality for the sign and revoke endpoints
+
+A database is required for the following:
+
+ - `revoke` marks certificates revoked in the database with an optional reason
+ - `ocsprefresh` refreshes the table of cached OCSP responses
+ - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format
+
+## Setup/Migration
+
+This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends.
+Currently supported:
+ - SQLite in sqlite
+ - PostgreSQL in pg
+
+### Get goose
+
+ go get https://bitbucket.org/liamstask/goose/
+
+### Use goose to start and terminate a SQLite DB
+To start a SQLite DB using goose:
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up'
+
+To tear down a SQLite DB using goose
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
+
+### Use goose to start and terminate a PostgreSQL DB
+To start a PostgreSQL using goose:
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up
+
+To tear down a PostgreSQL DB using goose
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down
+
+Note: the administration of PostgreSQL DB is not included. We assume
+the databases being connected to are already created and access control
+are properly handled.
+
+## CFSSL Configuration
+
+Several cfssl commands take a -db-config flag. Create a file with a
+JSON dictionary:
+
+ {"driver":"sqlite3","data_source":"certs.db"}
+
+or
+
+ {"driver":"postgres","data_source":"postgres://user:password@host/db"}
+
diff --git a/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go b/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go
new file mode 100644
index 0000000000..96694f7685
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go
@@ -0,0 +1,40 @@
+package certdb
+
+import (
+ "time"
+)
+
+// CertificateRecord encodes a certificate and its metadata
+// that will be recorded in a database.
+type CertificateRecord struct {
+ Serial string `db:"serial_number"`
+ AKI string `db:"authority_key_identifier"`
+ CALabel string `db:"ca_label"`
+ Status string `db:"status"`
+ Reason int `db:"reason"`
+ Expiry time.Time `db:"expiry"`
+ RevokedAt time.Time `db:"revoked_at"`
+ PEM string `db:"pem"`
+}
+
+// OCSPRecord encodes a OCSP response body and its metadata
+// that will be recorded in a database.
+type OCSPRecord struct {
+ Serial string `db:"serial_number"`
+ AKI string `db:"authority_key_identifier"`
+ Body string `db:"body"`
+ Expiry time.Time `db:"expiry"`
+}
+
+// Accessor abstracts the CRUD of certdb objects from a DB.
+type Accessor interface {
+ InsertCertificate(cr CertificateRecord) error
+ GetCertificate(serial, aki string) ([]CertificateRecord, error)
+ GetUnexpiredCertificates() ([]CertificateRecord, error)
+ RevokeCertificate(serial, aki string, reasonCode int) error
+ InsertOCSP(rr OCSPRecord) error
+ GetOCSP(serial, aki string) ([]OCSPRecord, error)
+ GetUnexpiredOCSPs() ([]OCSPRecord, error)
+ UpdateOCSP(serial, aki, body string, expiry time.Time) error
+ UpsertOCSP(serial, aki, body string, expiry time.Time) error
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/config/config.go b/vendor/src/github.com/cloudflare/cfssl/config/config.go
new file mode 100644
index 0000000000..a6837eb0db
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/config/config.go
@@ -0,0 +1,563 @@
+// Package config contains the configuration logic for CFSSL.
+package config
+
+import (
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/cloudflare/cfssl/auth"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/log"
+ ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
+)
+
+// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
+// not present in a SigningProfile, all of these fields may be copied from the
+// CSR into the signed certificate. If a CSRWhitelist *is* present in a
+// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
+// be copied from the CSR to the signed certificate. Note that some of these
+// fields, like Subject, can be provided or partially provided through the API.
+// Since API clients are expected to be trusted, but CSRs are not, fields
+// provided through the API are not subject to whitelisting through this
+// mechanism.
+type CSRWhitelist struct {
+ Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
+ DNSNames, IPAddresses, EmailAddresses bool
+}
+
+// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
+// JSON marshal / unmarshal.
+type OID asn1.ObjectIdentifier
+
+// CertificatePolicy represents the ASN.1 PolicyInformation structure from
+// https://tools.ietf.org/html/rfc3280.html#page-106.
+// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
+type CertificatePolicy struct {
+ ID OID
+ Qualifiers []CertificatePolicyQualifier
+}
+
+// CertificatePolicyQualifier represents a single qualifier from an ASN.1
+// PolicyInformation structure.
+type CertificatePolicyQualifier struct {
+ Type string
+ Value string
+}
+
+// AuthRemote is an authenticated remote signer.
+type AuthRemote struct {
+ RemoteName string `json:"remote"`
+ AuthKeyName string `json:"auth_key"`
+}
+
+// A SigningProfile stores information that the CA needs to store
+// signature policy.
+type SigningProfile struct {
+ Usage []string `json:"usages"`
+ IssuerURL []string `json:"issuer_urls"`
+ OCSP string `json:"ocsp_url"`
+ CRL string `json:"crl_url"`
+ CA bool `json:"is_ca"`
+ OCSPNoCheck bool `json:"ocsp_no_check"`
+ ExpiryString string `json:"expiry"`
+ BackdateString string `json:"backdate"`
+ AuthKeyName string `json:"auth_key"`
+ RemoteName string `json:"remote"`
+ NotBefore time.Time `json:"not_before"`
+ NotAfter time.Time `json:"not_after"`
+ NameWhitelistString string `json:"name_whitelist"`
+ AuthRemote AuthRemote `json:"auth_remote"`
+ CTLogServers []string `json:"ct_log_servers"`
+ AllowedExtensions []OID `json:"allowed_extensions"`
+ CertStore string `json:"cert_store"`
+
+ Policies []CertificatePolicy
+ Expiry time.Duration
+ Backdate time.Duration
+ Provider auth.Provider
+ RemoteProvider auth.Provider
+ RemoteServer string
+ CSRWhitelist *CSRWhitelist
+ NameWhitelist *regexp.Regexp
+ ExtensionWhitelist map[string]bool
+ ClientProvidesSerialNumbers bool
+}
+
+// UnmarshalJSON unmarshals a JSON string into an OID.
+func (oid *OID) UnmarshalJSON(data []byte) (err error) {
+ if data[0] != '"' || data[len(data)-1] != '"' {
+ return errors.New("OID JSON string not wrapped in quotes." + string(data))
+ }
+ data = data[1 : len(data)-1]
+ parsedOid, err := parseObjectIdentifier(string(data))
+ if err != nil {
+ return err
+ }
+ *oid = OID(parsedOid)
+ return
+}
+
+// MarshalJSON marshals an oid into a JSON string.
+func (oid OID) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
+}
+
+func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
+ validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
+ if err != nil {
+ return
+ }
+ if !validOID {
+ err = errors.New("Invalid OID")
+ return
+ }
+
+ segments := strings.Split(oidString, ".")
+ oid = make(asn1.ObjectIdentifier, len(segments))
+ for i, intString := range segments {
+ oid[i], err = strconv.Atoi(intString)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+const timeFormat = "2006-01-02T15:04:05"
+
+// populate is used to fill in the fields that are not in JSON
+//
+// First, the ExpiryString parameter is needed to parse
+// expiration timestamps from JSON. The JSON decoder is not able to
+// decode a string time duration to a time.Duration, so this is called
+// when loading the configuration to properly parse and fill out the
+// Expiry parameter.
+// This function is also used to create references to the auth key
+// and default remote for the profile.
+// It returns true if ExpiryString is a valid representation of a
+// time.Duration, and the AuthKeyString and RemoteName point to
+// valid objects. It returns false otherwise.
+func (p *SigningProfile) populate(cfg *Config) error {
+ if p == nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
+ }
+
+ var err error
+ if p.RemoteName == "" && p.AuthRemote.RemoteName == "" {
+ log.Debugf("parse expiry in profile")
+ if p.ExpiryString == "" {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
+ }
+
+ dur, err := time.ParseDuration(p.ExpiryString)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+
+ log.Debugf("expiry is valid")
+ p.Expiry = dur
+
+ if p.BackdateString != "" {
+ dur, err = time.ParseDuration(p.BackdateString)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+
+ p.Backdate = dur
+ }
+
+ if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+
+ if len(p.Policies) > 0 {
+ for _, policy := range p.Policies {
+ for _, qualifier := range policy.Qualifiers {
+ if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("invalid policy qualifier type"))
+ }
+ }
+ }
+ }
+ } else if p.RemoteName != "" {
+ log.Debug("match remote in profile to remotes section")
+ if p.AuthRemote.RemoteName != "" {
+ log.Error("profile has both a remote and an auth remote specified")
+ return cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+ if remote := cfg.Remotes[p.RemoteName]; remote != "" {
+ if err := p.updateRemote(remote); err != nil {
+ return err
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find remote in remotes section"))
+ }
+ } else {
+ log.Debug("match auth remote in profile to remotes section")
+ if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" {
+ if err := p.updateRemote(remote); err != nil {
+ return err
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find remote in remotes section"))
+ }
+ }
+
+ if p.AuthKeyName != "" {
+ log.Debug("match auth key in profile to auth_keys section")
+ if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
+ if key.Type == "standard" {
+ p.Provider, err = auth.New(key.Key, nil)
+ if err != nil {
+ log.Debugf("failed to create new standard auth provider: %v", err)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to create new standard auth provider"))
+ }
+ } else {
+ log.Debugf("unknown authentication type %v", key.Type)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("unknown authentication type"))
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find auth_key in auth_keys section"))
+ }
+ }
+
+ if p.AuthRemote.AuthKeyName != "" {
+ log.Debug("match auth remote key in profile to auth_keys section")
+ if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true {
+ if key.Type == "standard" {
+ p.RemoteProvider, err = auth.New(key.Key, nil)
+ if err != nil {
+ log.Debugf("failed to create new standard auth provider: %v", err)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to create new standard auth provider"))
+ }
+ } else {
+ log.Debugf("unknown authentication type %v", key.Type)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("unknown authentication type"))
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find auth_remote's auth_key in auth_keys section"))
+ }
+ }
+
+ if p.NameWhitelistString != "" {
+ log.Debug("compiling whitelist regular expression")
+ rule, err := regexp.Compile(p.NameWhitelistString)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to compile name whitelist section"))
+ }
+ p.NameWhitelist = rule
+ }
+
+ p.ExtensionWhitelist = map[string]bool{}
+ for _, oid := range p.AllowedExtensions {
+ p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true
+ }
+
+ return nil
+}
+
+// updateRemote takes a signing profile and initializes the remote server object
+// to the hostname:port combination sent by remote.
+func (p *SigningProfile) updateRemote(remote string) error {
+ if remote != "" {
+ p.RemoteServer = remote
+ }
+ return nil
+}
+
+// OverrideRemotes takes a signing configuration and updates the remote server object
+// to the hostname:port combination sent by remote
+func (p *Signing) OverrideRemotes(remote string) error {
+ if remote != "" {
+ var err error
+ for _, profile := range p.Profiles {
+ err = profile.updateRemote(remote)
+ if err != nil {
+ return err
+ }
+ }
+ err = p.Default.updateRemote(remote)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NeedsRemoteSigner returns true if one of the profiles has a remote set
+func (p *Signing) NeedsRemoteSigner() bool {
+ for _, profile := range p.Profiles {
+ if profile.RemoteServer != "" {
+ return true
+ }
+ }
+
+ if p.Default.RemoteServer != "" {
+ return true
+ }
+
+ return false
+}
+
+// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
+func (p *Signing) NeedsLocalSigner() bool {
+ for _, profile := range p.Profiles {
+ if profile.RemoteServer == "" {
+ return true
+ }
+ }
+
+ if p.Default.RemoteServer == "" {
+ return true
+ }
+
+ return false
+}
+
+// Usages parses the list of key uses in the profile, translating them
+// to a list of X.509 key usages and extended key usages. The unknown
+// uses are collected into a slice that is also returned.
+func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
+ for _, keyUse := range p.Usage {
+ if kuse, ok := KeyUsage[keyUse]; ok {
+ ku |= kuse
+ } else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
+ eku = append(eku, ekuse)
+ } else {
+ unk = append(unk, keyUse)
+ }
+ }
+ return
+}
+
+// A valid profile must be a valid local profile or a valid remote profile.
+// A valid local profile has defined at least key usages to be used, and a
+// valid local default profile has defined at least a default expiration.
+// A valid remote profile (default or not) has remote signer initialized.
+// In addition, a remote profile must has a valid auth provider if auth
+// key defined.
+func (p *SigningProfile) validProfile(isDefault bool) bool {
+ if p == nil {
+ return false
+ }
+
+ if p.RemoteName != "" {
+ log.Debugf("validate remote profile")
+
+ if p.RemoteServer == "" {
+ log.Debugf("invalid remote profile: no remote signer specified")
+ return false
+ }
+
+ if p.AuthKeyName != "" && p.Provider == nil {
+ log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
+ return false
+ }
+
+ if p.AuthRemote.RemoteName != "" {
+ log.Debugf("invalid remote profile: auth remote is also specified")
+ }
+ } else if p.AuthRemote.RemoteName != "" {
+ log.Debugf("validate auth remote profile")
+ if p.RemoteServer == "" {
+ log.Debugf("invalid auth remote profile: no remote signer specified")
+ return false
+ }
+
+ if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil {
+ log.Debugf("invalid auth remote profile: no auth key is defined")
+ return false
+ }
+ } else {
+ log.Debugf("validate local profile")
+ if !isDefault {
+ if len(p.Usage) == 0 {
+ log.Debugf("invalid local profile: no usages specified")
+ return false
+ } else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
+ log.Debugf("invalid local profile: no valid usages")
+ return false
+ }
+ } else {
+ if p.Expiry == 0 {
+ log.Debugf("invalid local profile: no expiry set")
+ return false
+ }
+ }
+ }
+
+ log.Debugf("profile is valid")
+ return true
+}
+
+// Signing codifies the signature configuration policy for a CA.
+type Signing struct {
+ Profiles map[string]*SigningProfile `json:"profiles"`
+ Default *SigningProfile `json:"default"`
+}
+
+// Config stores configuration information for the CA.
+type Config struct {
+ Signing *Signing `json:"signing"`
+ OCSP *ocspConfig.Config `json:"ocsp"`
+ AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
+ Remotes map[string]string `json:"remotes,omitempty"`
+}
+
+// Valid ensures that Config is a valid configuration. It should be
+// called immediately after parsing a configuration file.
+func (c *Config) Valid() bool {
+ return c.Signing.Valid()
+}
+
+// Valid checks the signature policies, ensuring they are valid
+// policies. A policy is valid if it has defined at least key usages
+// to be used, and a valid default profile has defined at least a
+// default expiration.
+func (p *Signing) Valid() bool {
+ if p == nil {
+ return false
+ }
+
+ log.Debugf("validating configuration")
+ if !p.Default.validProfile(true) {
+ log.Debugf("default profile is invalid")
+ return false
+ }
+
+ for _, sp := range p.Profiles {
+ if !sp.validProfile(false) {
+ log.Debugf("invalid profile")
+ return false
+ }
+ }
+ return true
+}
+
+// KeyUsage contains a mapping of string names to key usages.
+var KeyUsage = map[string]x509.KeyUsage{
+ "signing": x509.KeyUsageDigitalSignature,
+ "digital signature": x509.KeyUsageDigitalSignature,
+ "content committment": x509.KeyUsageContentCommitment,
+ "key encipherment": x509.KeyUsageKeyEncipherment,
+ "key agreement": x509.KeyUsageKeyAgreement,
+ "data encipherment": x509.KeyUsageDataEncipherment,
+ "cert sign": x509.KeyUsageCertSign,
+ "crl sign": x509.KeyUsageCRLSign,
+ "encipher only": x509.KeyUsageEncipherOnly,
+ "decipher only": x509.KeyUsageDecipherOnly,
+}
+
+// ExtKeyUsage contains a mapping of string names to extended key
+// usages.
+var ExtKeyUsage = map[string]x509.ExtKeyUsage{
+ "any": x509.ExtKeyUsageAny,
+ "server auth": x509.ExtKeyUsageServerAuth,
+ "client auth": x509.ExtKeyUsageClientAuth,
+ "code signing": x509.ExtKeyUsageCodeSigning,
+ "email protection": x509.ExtKeyUsageEmailProtection,
+ "s/mime": x509.ExtKeyUsageEmailProtection,
+ "ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
+ "ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
+ "ipsec user": x509.ExtKeyUsageIPSECUser,
+ "timestamping": x509.ExtKeyUsageTimeStamping,
+ "ocsp signing": x509.ExtKeyUsageOCSPSigning,
+ "microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
+ "netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
+}
+
+// An AuthKey contains an entry for a key used for authentication.
+type AuthKey struct {
+ // Type contains information needed to select the appropriate
+ // constructor. For example, "standard" for HMAC-SHA-256,
+ // "standard-ip" for HMAC-SHA-256 incorporating the client's
+ // IP.
+ Type string `json:"type"`
+ // Key contains the key information, such as a hex-encoded
+ // HMAC key.
+ Key string `json:"key"`
+}
+
+// DefaultConfig returns a default configuration specifying basic key
+// usage and a 1 year expiration time. The key usages chosen are
+// signing, key encipherment, client auth and server auth.
+func DefaultConfig() *SigningProfile {
+ d := helpers.OneYear
+ return &SigningProfile{
+ Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
+ Expiry: d,
+ ExpiryString: "8760h",
+ }
+}
+
+// LoadFile attempts to load the configuration file stored at the path
+// and returns the configuration. On error, it returns nil.
+func LoadFile(path string) (*Config, error) {
+ log.Debugf("loading configuration file from %s", path)
+ if path == "" {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
+ }
+
+ body, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
+ }
+
+ return LoadConfig(body)
+}
+
+// LoadConfig attempts to load the configuration from a byte slice.
+// On error, it returns nil.
+func LoadConfig(config []byte) (*Config, error) {
+ var cfg = &Config{}
+ err := json.Unmarshal(config, &cfg)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to unmarshal configuration: "+err.Error()))
+ }
+
+ if cfg.Signing == nil {
+ return nil, errors.New("No \"signing\" field present")
+ }
+
+ if cfg.Signing.Default == nil {
+ log.Debugf("no default given: using default config")
+ cfg.Signing.Default = DefaultConfig()
+ } else {
+ if err := cfg.Signing.Default.populate(cfg); err != nil {
+ return nil, err
+ }
+ }
+
+ for k := range cfg.Signing.Profiles {
+ if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
+ return nil, err
+ }
+ }
+
+ if !cfg.Valid() {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
+ }
+
+ log.Debugf("configuration ok")
+ return cfg, nil
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go b/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go
new file mode 100644
index 0000000000..8db547fce5
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go
@@ -0,0 +1,188 @@
+// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
+// used to package certificates and CRLs. Using openssl, every certificate converted
+// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
+// reference: https://www.openssl.org/docs/apps/crl2pkcs7.html
+//
+// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
+//
+// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
+// for example data can be encrypted and signed and then packaged through pkcs#7 to be
+// sent over a network and then verified and decrypted. It is asn1, and the type of
+// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
+//
+// ContentInfo ::= SEQUENCE {
+// contentType ContentType,
+// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
+// }
+//
+// There are 6 possible ContentTypes, data, signedData, envelopedData,
+// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
+// Data are implemented, as the degenerate case of signedData without a signature is the typical
+// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
+// formats.
+// The ContentType signedData has the form:
+//
+//
+// signedData ::= SEQUENCE {
+// version Version,
+// digestAlgorithms DigestAlgorithmIdentifiers,
+// contentInfo ContentInfo,
+// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
+// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
+// signerInfos SignerInfos
+// }
+//
+// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
+// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
+// recursive, this second layer of ContentInfo is similar ignored for our degenerate
+// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
+// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
+// of any number of extended certificates is not yet supported in this implementation.
+//
+// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
+//
+// The ContentType encryptedData is the most complicated and its form can be gathered by
+// the go type below. It essentially contains a raw octet string of encrypted data and an
+// algorithm identifier for use in decrypting this data.
+package pkcs7
+
+import (
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+
+ cferr "github.com/cloudflare/cfssl/errors"
+)
+
+// Types used for asn1 Unmarshaling.
+
+type signedData struct {
+ Version int
+ DigestAlgorithms asn1.RawValue
+ ContentInfo asn1.RawValue
+ Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
+ Crls asn1.RawValue `asn1:"optional"`
+ SignerInfos asn1.RawValue
+}
+
+type initPKCS7 struct {
+ Raw asn1.RawContent
+ ContentType asn1.ObjectIdentifier
+ Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
+}
+
+// Object identifier strings of the three implemented PKCS7 types.
+const (
+ ObjIDData = "1.2.840.113549.1.7.1"
+ ObjIDSignedData = "1.2.840.113549.1.7.2"
+ ObjIDEncryptedData = "1.2.840.113549.1.7.6"
+)
+
+// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
+// possible types of Content objects, as denoted by the object identifier in
+// the ContentInfo field, the other two being nil. SignedData
+// is the degenerate SignedData Content info without signature used
+// to hold certificates and crls. Data is raw bytes, and EncryptedData
+// is as defined in PKCS #7 standard.
+type PKCS7 struct {
+ Raw asn1.RawContent
+ ContentInfo string
+ Content Content
+}
+
+// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
+type Content struct {
+ Data []byte
+ SignedData SignedData
+ EncryptedData EncryptedData
+}
+
+// SignedData defines the typical carrier of certificates and crls.
+type SignedData struct {
+ Raw asn1.RawContent
+ Version int
+ Certificates []*x509.Certificate
+ Crl *pkix.CertificateList
+}
+
+// Data contains raw bytes. Used as a subtype in PKCS12.
+type Data struct {
+ Bytes []byte
+}
+
+// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
+type EncryptedData struct {
+ Raw asn1.RawContent
+ Version int
+ EncryptedContentInfo EncryptedContentInfo
+}
+
+// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
+type EncryptedContentInfo struct {
+ Raw asn1.RawContent
+ ContentType asn1.ObjectIdentifier
+ ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedContent []byte `asn1:"tag:0,optional"`
+}
+
+// ParsePKCS7 attempts to parse the DER encoded bytes of a
+// PKCS7 structure.
+func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
+
+ var pkcs7 initPKCS7
+ _, err = asn1.Unmarshal(raw, &pkcs7)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+
+ msg = new(PKCS7)
+ msg.Raw = pkcs7.Raw
+ msg.ContentInfo = pkcs7.ContentType.String()
+ switch {
+ case msg.ContentInfo == ObjIDData:
+ msg.ContentInfo = "Data"
+ _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ case msg.ContentInfo == ObjIDSignedData:
+ msg.ContentInfo = "SignedData"
+ var signedData signedData
+ _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ if len(signedData.Certificates.Bytes) != 0 {
+ msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ }
+ if len(signedData.Crls.Bytes) != 0 {
+ msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ }
+ msg.Content.SignedData.Version = signedData.Version
+ msg.Content.SignedData.Raw = pkcs7.Content.Bytes
+ case msg.ContentInfo == ObjIDEncryptedData:
+ msg.ContentInfo = "EncryptedData"
+ var encryptedData EncryptedData
+ _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ if encryptedData.Version != 0 {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0"))
+ }
+ msg.Content.EncryptedData = encryptedData
+
+ default:
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data"))
+ }
+
+ return msg, nil
+
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/csr/csr.go b/vendor/src/github.com/cloudflare/cfssl/csr/csr.go
new file mode 100644
index 0000000000..6e3653fba6
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/csr/csr.go
@@ -0,0 +1,414 @@
+// Package csr implements certificate requests for CFSSL.
+package csr
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "errors"
+ "net"
+ "net/mail"
+ "strings"
+
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/log"
+)
+
+const (
+ curveP256 = 256
+ curveP384 = 384
+ curveP521 = 521
+)
+
+// A Name contains the SubjectInfo fields.
+type Name struct {
+ C string // Country
+ ST string // State
+ L string // Locality
+ O string // OrganisationName
+ OU string // OrganisationalUnitName
+ SerialNumber string
+}
+
+// A KeyRequest is a generic request for a new key.
+type KeyRequest interface {
+ Algo() string
+ Size() int
+ Generate() (crypto.PrivateKey, error)
+ SigAlgo() x509.SignatureAlgorithm
+}
+
+// A BasicKeyRequest contains the algorithm and key size for a new private key.
+type BasicKeyRequest struct {
+ A string `json:"algo"`
+ S int `json:"size"`
+}
+
+// NewBasicKeyRequest returns a default BasicKeyRequest.
+func NewBasicKeyRequest() *BasicKeyRequest {
+ return &BasicKeyRequest{"ecdsa", curveP256}
+}
+
+// Algo returns the requested key algorithm represented as a string.
+func (kr *BasicKeyRequest) Algo() string {
+ return kr.A
+}
+
+// Size returns the requested key size.
+func (kr *BasicKeyRequest) Size() int {
+ return kr.S
+}
+
+// Generate generates a key as specified in the request. Currently,
+// only ECDSA and RSA are supported.
+func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) {
+ log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size())
+ switch kr.Algo() {
+ case "rsa":
+ if kr.Size() < 2048 {
+ return nil, errors.New("RSA key is too weak")
+ }
+ if kr.Size() > 8192 {
+ return nil, errors.New("RSA key size too large")
+ }
+ return rsa.GenerateKey(rand.Reader, kr.Size())
+ case "ecdsa":
+ var curve elliptic.Curve
+ switch kr.Size() {
+ case curveP256:
+ curve = elliptic.P256()
+ case curveP384:
+ curve = elliptic.P384()
+ case curveP521:
+ curve = elliptic.P521()
+ default:
+ return nil, errors.New("invalid curve")
+ }
+ return ecdsa.GenerateKey(curve, rand.Reader)
+ default:
+ return nil, errors.New("invalid algorithm")
+ }
+}
+
+// SigAlgo returns an appropriate X.509 signature algorithm given the
+// key request's type and size.
+func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
+ switch kr.Algo() {
+ case "rsa":
+ switch {
+ case kr.Size() >= 4096:
+ return x509.SHA512WithRSA
+ case kr.Size() >= 3072:
+ return x509.SHA384WithRSA
+ case kr.Size() >= 2048:
+ return x509.SHA256WithRSA
+ default:
+ return x509.SHA1WithRSA
+ }
+ case "ecdsa":
+ switch kr.Size() {
+ case curveP521:
+ return x509.ECDSAWithSHA512
+ case curveP384:
+ return x509.ECDSAWithSHA384
+ case curveP256:
+ return x509.ECDSAWithSHA256
+ default:
+ return x509.ECDSAWithSHA1
+ }
+ default:
+ return x509.UnknownSignatureAlgorithm
+ }
+}
+
+// CAConfig is a section used in the requests initialising a new CA.
+type CAConfig struct {
+ PathLength int `json:"pathlen"`
+ Expiry string `json:"expiry"`
+}
+
+// A CertificateRequest encapsulates the API interface to the
+// certificate request functionality.
+type CertificateRequest struct {
+ CN string
+ Names []Name `json:"names"`
+ Hosts []string `json:"hosts"`
+ KeyRequest KeyRequest `json:"key,omitempty"`
+ CA *CAConfig `json:"ca,omitempty"`
+ SerialNumber string `json:"serialnumber,omitempty"`
+}
+
+// New returns a new, empty CertificateRequest with a
+// BasicKeyRequest.
+func New() *CertificateRequest {
+ return &CertificateRequest{
+ KeyRequest: NewBasicKeyRequest(),
+ }
+}
+
+// appendIf appends to a if s is not an empty string.
+func appendIf(s string, a *[]string) {
+ if s != "" {
+ *a = append(*a, s)
+ }
+}
+
+// Name returns the PKIX name for the request.
+func (cr *CertificateRequest) Name() pkix.Name {
+ var name pkix.Name
+ name.CommonName = cr.CN
+
+ for _, n := range cr.Names {
+ appendIf(n.C, &name.Country)
+ appendIf(n.ST, &name.Province)
+ appendIf(n.L, &name.Locality)
+ appendIf(n.O, &name.Organization)
+ appendIf(n.OU, &name.OrganizationalUnit)
+ }
+ name.SerialNumber = cr.SerialNumber
+ return name
+}
+
+// ParseRequest takes a certificate request and generates a key and
+// CSR from it. It does no validation -- caveat emptor. It will,
+// however, fail if the key request is not valid (i.e., an unsupported
+// curve or RSA key size). The lack of validation was specifically
+// chosen to allow the end user to define a policy and validate the
+// request appropriately before calling this function.
+func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
+ log.Info("received CSR")
+ if req.KeyRequest == nil {
+ req.KeyRequest = NewBasicKeyRequest()
+ }
+
+ log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size())
+ priv, err := req.KeyRequest.Generate()
+ if err != nil {
+ err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err)
+ return
+ }
+
+ switch priv := priv.(type) {
+ case *rsa.PrivateKey:
+ key = x509.MarshalPKCS1PrivateKey(priv)
+ block := pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: key,
+ }
+ key = pem.EncodeToMemory(&block)
+ case *ecdsa.PrivateKey:
+ key, err = x509.MarshalECPrivateKey(priv)
+ if err != nil {
+ err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err)
+ return
+ }
+ block := pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: key,
+ }
+ key = pem.EncodeToMemory(&block)
+ default:
+ panic("Generate should have failed to produce a valid key.")
+ }
+
+ var tpl = x509.CertificateRequest{
+ Subject: req.Name(),
+ SignatureAlgorithm: req.KeyRequest.SigAlgo(),
+ }
+
+ for i := range req.Hosts {
+ if ip := net.ParseIP(req.Hosts[i]); ip != nil {
+ tpl.IPAddresses = append(tpl.IPAddresses, ip)
+ } else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
+ tpl.EmailAddresses = append(tpl.EmailAddresses, req.Hosts[i])
+ } else {
+ tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
+ }
+ }
+
+ csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
+ if err != nil {
+ log.Errorf("failed to generate a CSR: %v", err)
+ err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
+ return
+ }
+ block := pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr,
+ }
+
+ log.Info("encoded CSR")
+ csr = pem.EncodeToMemory(&block)
+ return
+}
+
+// ExtractCertificateRequest extracts a CertificateRequest from
+// x509.Certificate. It is aimed to used for generating a new certificate
+// from an existing certificate. For a root certificate, the CA expiry
+// length is calculated as the duration between cert.NotAfter and cert.NotBefore.
+func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
+ req := New()
+ req.CN = cert.Subject.CommonName
+ req.Names = getNames(cert.Subject)
+ req.Hosts = getHosts(cert)
+ req.SerialNumber = cert.Subject.SerialNumber
+
+ if cert.IsCA {
+ req.CA = new(CAConfig)
+ // CA expiry length is calculated based on the input cert
+ // issue date and expiry date.
+ req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
+ req.CA.PathLength = cert.MaxPathLen
+ }
+
+ return req
+}
+
+func getHosts(cert *x509.Certificate) []string {
+ var hosts []string
+ for _, ip := range cert.IPAddresses {
+ hosts = append(hosts, ip.String())
+ }
+ for _, dns := range cert.DNSNames {
+ hosts = append(hosts, dns)
+ }
+ for _, email := range cert.EmailAddresses {
+ hosts = append(hosts, email)
+ }
+
+ return hosts
+}
+
+// getNames returns an array of Names from the certificate
+// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province
+func getNames(sub pkix.Name) []Name {
+ // anonymous func for finding the max of a list of interger
+ max := func(v1 int, vn ...int) (max int) {
+ max = v1
+ for i := 0; i < len(vn); i++ {
+ if vn[i] > max {
+ max = vn[i]
+ }
+ }
+ return max
+ }
+
+ nc := len(sub.Country)
+ norg := len(sub.Organization)
+ nou := len(sub.OrganizationalUnit)
+ nl := len(sub.Locality)
+ np := len(sub.Province)
+
+ n := max(nc, norg, nou, nl, np)
+
+ names := make([]Name, n)
+ for i := range names {
+ if i < nc {
+ names[i].C = sub.Country[i]
+ }
+ if i < norg {
+ names[i].O = sub.Organization[i]
+ }
+ if i < nou {
+ names[i].OU = sub.OrganizationalUnit[i]
+ }
+ if i < nl {
+ names[i].L = sub.Locality[i]
+ }
+ if i < np {
+ names[i].ST = sub.Province[i]
+ }
+ }
+ return names
+}
+
+// A Generator is responsible for validating certificate requests.
+type Generator struct {
+ Validator func(*CertificateRequest) error
+}
+
+// ProcessRequest validates and processes the incoming request. It is
+// a wrapper around a validator and the ParseRequest function.
+func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) {
+
+ log.Info("generate received request")
+ err = g.Validator(req)
+ if err != nil {
+ log.Warningf("invalid request: %v", err)
+ return
+ }
+
+ csr, key, err = ParseRequest(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ return
+}
+
+// IsNameEmpty returns true if the name has no identifying information in it.
+func IsNameEmpty(n Name) bool {
+ empty := func(s string) bool { return strings.TrimSpace(s) == "" }
+
+ if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) {
+ return true
+ }
+ return false
+}
+
+// Regenerate uses the provided CSR as a template for signing a new
+// CSR using priv.
+func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
+ req, extra, err := helpers.ParseCSR(csr)
+ if err != nil {
+ return nil, err
+ } else if len(extra) > 0 {
+ return nil, errors.New("csr: trailing data in certificate request")
+ }
+
+ return x509.CreateCertificateRequest(rand.Reader, req, priv)
+}
+
+// Generate creates a new CSR from a CertificateRequest structure and
+// an existing key. The KeyRequest field is ignored.
+func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
+ sigAlgo := helpers.SignerAlgo(priv, crypto.SHA256)
+ if sigAlgo == x509.UnknownSignatureAlgorithm {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
+ }
+
+ var tpl = x509.CertificateRequest{
+ Subject: req.Name(),
+ SignatureAlgorithm: sigAlgo,
+ }
+
+ for i := range req.Hosts {
+ if ip := net.ParseIP(req.Hosts[i]); ip != nil {
+ tpl.IPAddresses = append(tpl.IPAddresses, ip)
+ } else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
+ tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)
+ } else {
+ tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
+ }
+ }
+
+ csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
+ if err != nil {
+ log.Errorf("failed to generate a CSR: %v", err)
+ err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
+ return
+ }
+ block := pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr,
+ }
+
+ log.Info("encoded CSR")
+ csr = pem.EncodeToMemory(&block)
+ return
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/doc.go b/vendor/src/github.com/cloudflare/cfssl/errors/doc.go
new file mode 100644
index 0000000000..1910e2662f
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/errors/doc.go
@@ -0,0 +1,46 @@
+/*
+Package errors provides error types returned in CF SSL.
+
+1. Type Error is intended for errors produced by CF SSL packages.
+It formats to a json object that consists of an error message and a 4-digit code for error reasoning.
+
+Example: {"code":1002, "message": "Failed to decode certificate"}
+
+The index of codes are listed below:
+ 1XXX: CertificateError
+ 1000: Unknown
+ 1001: ReadFailed
+ 1002: DecodeFailed
+ 1003: ParseFailed
+ 1100: SelfSigned
+ 12XX: VerifyFailed
+ 121X: CertificateInvalid
+ 1210: NotAuthorizedToSign
+ 1211: Expired
+ 1212: CANotAuthorizedForThisName
+ 1213: TooManyIntermediates
+ 1214: IncompatibleUsage
+ 1220: UnknownAuthority
+ 2XXX: PrivatekeyError
+ 2000: Unknown
+ 2001: ReadFailed
+ 2002: DecodeFailed
+ 2003: ParseFailed
+ 2100: Encrypted
+ 2200: NotRSA
+ 2300: KeyMismatch
+ 2400: GenerationFailed
+ 2500: Unavailable
+ 3XXX: IntermediatesError
+ 4XXX: RootError
+ 5XXX: PolicyError
+ 5100: NoKeyUsages
+ 5200: InvalidPolicy
+ 5300: InvalidRequest
+ 5400: UnknownProfile
+ 6XXX: DialError
+
+2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned
+by the API server.
+*/
+package errors
diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/error.go b/vendor/src/github.com/cloudflare/cfssl/errors/error.go
new file mode 100644
index 0000000000..88663b2c67
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/errors/error.go
@@ -0,0 +1,420 @@
+package errors
+
+import (
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+)
+
+// Error is the error type usually returned by functions in CF SSL package.
+// It contains a 4-digit error code where the most significant digit
+// describes the category where the error occurred and the rest 3 digits
+// describe the specific error reason.
+type Error struct {
+ ErrorCode int `json:"code"`
+ Message string `json:"message"`
+}
+
+// Category is the most significant digit of the error code.
+type Category int
+
+// Reason is the last 3 digits of the error code.
+type Reason int
+
+const (
+ // Success indicates no error occurred.
+ Success Category = 1000 * iota // 0XXX
+
+ // CertificateError indicates a fault in a certificate.
+ CertificateError // 1XXX
+
+ // PrivateKeyError indicates a fault in a private key.
+ PrivateKeyError // 2XXX
+
+ // IntermediatesError indicates a fault in an intermediate.
+ IntermediatesError // 3XXX
+
+ // RootError indicates a fault in a root.
+ RootError // 4XXX
+
+ // PolicyError indicates an error arising from a malformed or
+ // non-existent policy, or a breach of policy.
+ PolicyError // 5XXX
+
+ // DialError indicates a network fault.
+ DialError // 6XXX
+
+ // APIClientError indicates a problem with the API client.
+ APIClientError // 7XXX
+
+ // OCSPError indicates a problem with OCSP signing
+ OCSPError // 8XXX
+
+ // CSRError indicates a problem with CSR parsing
+ CSRError // 9XXX
+
+ // CTError indicates a problem with the certificate transparency process
+ CTError // 10XXX
+
+ // CertStoreError indicates a problem with the certificate store
+ CertStoreError // 11XXX
+)
+
+// None is a non-specified error.
+const (
+ None Reason = iota
+)
+
+// Warning code for a success
+const (
+ BundleExpiringBit int = 1 << iota // 0x01
+ BundleNotUbiquitousBit // 0x02
+)
+
+// Parsing errors
+const (
+ Unknown Reason = iota // X000
+ ReadFailed // X001
+ DecodeFailed // X002
+ ParseFailed // X003
+)
+
+// The following represent certificate non-parsing errors, and must be
+// specified along with CertificateError.
+const (
+ // SelfSigned indicates that a certificate is self-signed and
+ // cannot be used in the manner being attempted.
+ SelfSigned Reason = 100 * (iota + 1) // Code 11XX
+
+ // VerifyFailed is an X.509 verification failure. The least two
+ // significant digits of 12XX is determined as the actual x509
+ // error is examined.
+ VerifyFailed // Code 12XX
+
+ // BadRequest indicates that the certificate request is invalid.
+ BadRequest // Code 13XX
+
+ // MissingSerial indicates that the profile specified
+ // 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial
+ // number.
+ MissingSerial // Code 14XX
+)
+
+const (
+ certificateInvalid = 10 * (iota + 1) //121X
+ unknownAuthority //122x
+)
+
+// The following represent private-key non-parsing errors, and must be
+// specified with PrivateKeyError.
+const (
+ // Encrypted indicates that the private key is a PKCS #8 encrypted
+ // private key. At this time, CFSSL does not support decrypting
+ // these keys.
+ Encrypted Reason = 100 * (iota + 1) //21XX
+
+ // NotRSAOrECC indicates that they key is not an RSA or ECC
+ // private key; these are the only two private key types supported
+ // at this time by CFSSL.
+ NotRSAOrECC //22XX
+
+ // KeyMismatch indicates that the private key does not match
+ // the public key or certificate being presented with the key.
+ KeyMismatch //23XX
+
+ // GenerationFailed indicates that a private key could not
+ // be generated.
+ GenerationFailed //24XX
+
+ // Unavailable indicates that a private key mechanism (such as
+ // PKCS #11) was requested but support for that mechanism is
+ // not available.
+ Unavailable
+)
+
+// The following are policy-related non-parsing errors, and must be
+// specified along with PolicyError.
+const (
+ // NoKeyUsages indicates that the profile does not permit any
+ // key usages for the certificate.
+ NoKeyUsages Reason = 100 * (iota + 1) // 51XX
+
+ // InvalidPolicy indicates that policy being requested is not
+ // a valid policy or does not exist.
+ InvalidPolicy // 52XX
+
+ // InvalidRequest indicates a certificate request violated the
+ // constraints of the policy being applied to the request.
+ InvalidRequest // 53XX
+
+ // UnknownProfile indicates that the profile does not exist.
+ UnknownProfile // 54XX
+)
+
+// The following are API client related errors, and should be
+// specified with APIClientError.
+const (
+ // AuthenticationFailure occurs when the client is unable
+ // to obtain an authentication token for the request.
+ AuthenticationFailure Reason = 100 * (iota + 1)
+
+ // JSONError wraps an encoding/json error.
+ JSONError
+
+ // IOError wraps an io/ioutil error.
+ IOError
+
+ // ClientHTTPError wraps a net/http error.
+ ClientHTTPError
+
+ // ServerRequestFailed covers any other failures from the API
+ // client.
+ ServerRequestFailed
+)
+
+// The following are OCSP related errors, and should be
+// specified with OCSPError
+const (
+ // IssuerMismatch ocurs when the certificate in the OCSP signing
+ // request was not issued by the CA that this responder responds for.
+ IssuerMismatch Reason = 100 * (iota + 1) // 81XX
+
+ // InvalidStatus occurs when the OCSP signing requests includes an
+ // invalid value for the certificate status.
+ InvalidStatus
+)
+
+// Certificate transparency related errors specified with CTError
+const (
+ // PrecertSubmissionFailed occurs when submitting a precertificate to
+ // a log server fails
+ PrecertSubmissionFailed = 100 * (iota + 1)
+)
+
+// Certificate persistence related errors specified with CertStoreError
+const (
+ // InsertionFailed occurs when a SQL insert query failes to complete.
+ InsertionFailed = 100 * (iota + 1)
+ // RecordNotFound occurs when a SQL query targeting on one unique
+ // record failes to update the specified row in the table.
+ RecordNotFound
+)
+
+// The error interface implementation, which formats to a JSON object string.
+func (e *Error) Error() string {
+ marshaled, err := json.Marshal(e)
+ if err != nil {
+ panic(err)
+ }
+ return string(marshaled)
+
+}
+
+// New returns an error that contains an error code and message derived from
+// the given category, reason. Currently, to avoid confusion, it is not
+// allowed to create an error of category Success
+func New(category Category, reason Reason) *Error {
+ errorCode := int(category) + int(reason)
+ var msg string
+ switch category {
+ case OCSPError:
+ switch reason {
+ case ReadFailed:
+ msg = "No certificate provided"
+ case IssuerMismatch:
+ msg = "Certificate not issued by this issuer"
+ case InvalidStatus:
+ msg = "Invalid revocation status"
+ }
+ case CertificateError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown certificate error"
+ case ReadFailed:
+ msg = "Failed to read certificate"
+ case DecodeFailed:
+ msg = "Failed to decode certificate"
+ case ParseFailed:
+ msg = "Failed to parse certificate"
+ case SelfSigned:
+ msg = "Certificate is self signed"
+ case VerifyFailed:
+ msg = "Unable to verify certificate"
+ case BadRequest:
+ msg = "Invalid certificate request"
+ case MissingSerial:
+ msg = "Missing serial number in request"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.",
+ reason))
+
+ }
+ case PrivateKeyError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown private key error"
+ case ReadFailed:
+ msg = "Failed to read private key"
+ case DecodeFailed:
+ msg = "Failed to decode private key"
+ case ParseFailed:
+ msg = "Failed to parse private key"
+ case Encrypted:
+ msg = "Private key is encrypted."
+ case NotRSAOrECC:
+ msg = "Private key algorithm is not RSA or ECC"
+ case KeyMismatch:
+ msg = "Private key does not match public key"
+ case GenerationFailed:
+ msg = "Failed to new private key"
+ case Unavailable:
+ msg = "Private key is unavailable"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.",
+ reason))
+ }
+ case IntermediatesError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown intermediate certificate error"
+ case ReadFailed:
+ msg = "Failed to read intermediate certificate"
+ case DecodeFailed:
+ msg = "Failed to decode intermediate certificate"
+ case ParseFailed:
+ msg = "Failed to parse intermediate certificate"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.",
+ reason))
+ }
+ case RootError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown root certificate error"
+ case ReadFailed:
+ msg = "Failed to read root certificate"
+ case DecodeFailed:
+ msg = "Failed to decode root certificate"
+ case ParseFailed:
+ msg = "Failed to parse root certificate"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.",
+ reason))
+ }
+ case PolicyError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown policy error"
+ case NoKeyUsages:
+ msg = "Invalid policy: no key usage available"
+ case InvalidPolicy:
+ msg = "Invalid or unknown policy"
+ case InvalidRequest:
+ msg = "Policy violation request"
+ case UnknownProfile:
+ msg = "Unknown policy profile"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.",
+ reason))
+ }
+ case DialError:
+ switch reason {
+ case Unknown:
+ msg = "Failed to dial remote server"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.",
+ reason))
+ }
+ case APIClientError:
+ switch reason {
+ case AuthenticationFailure:
+ msg = "API client authentication failure"
+ case JSONError:
+ msg = "API client JSON config error"
+ case ClientHTTPError:
+ msg = "API client HTTP error"
+ case IOError:
+ msg = "API client IO error"
+ case ServerRequestFailed:
+ msg = "API client error: Server request failed"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.",
+ reason))
+ }
+ case CSRError:
+ switch reason {
+ case Unknown:
+ msg = "CSR parsing failed due to unknown error"
+ case ReadFailed:
+ msg = "CSR file read failed"
+ case ParseFailed:
+ msg = "CSR Parsing failed"
+ case DecodeFailed:
+ msg = "CSR Decode failed"
+ case BadRequest:
+ msg = "CSR Bad request"
+ default:
+ panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason))
+ }
+ case CTError:
+ switch reason {
+ case Unknown:
+ msg = "Certificate transparency parsing failed due to unknown error"
+ case PrecertSubmissionFailed:
+ msg = "Certificate transparency precertificate submission failed"
+ default:
+ panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason))
+ }
+ case CertStoreError:
+ switch reason {
+ case Unknown:
+ msg = "Certificate store action failed due to unknown error"
+ default:
+ panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason))
+ }
+
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
+ category))
+ }
+ return &Error{ErrorCode: errorCode, Message: msg}
+}
+
+// Wrap returns an error that contains the given error and an error code derived from
+// the given category, reason and the error. Currently, to avoid confusion, it is not
+// allowed to create an error of category Success
+func Wrap(category Category, reason Reason, err error) *Error {
+ errorCode := int(category) + int(reason)
+ if err == nil {
+ panic("Wrap needs a supplied error to initialize.")
+ }
+
+ // do not double wrap a error
+ switch err.(type) {
+ case *Error:
+ panic("Unable to wrap a wrapped error.")
+ }
+
+ switch category {
+ case CertificateError:
+ // given VerifyFailed , report the status with more detailed status code
+ // for some certificate errors we care.
+ if reason == VerifyFailed {
+ switch errorType := err.(type) {
+ case x509.CertificateInvalidError:
+ errorCode += certificateInvalid + int(errorType.Reason)
+ case x509.UnknownAuthorityError:
+ errorCode += unknownAuthority
+ }
+ }
+ case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError,
+ APIClientError, CSRError, CTError, CertStoreError:
+ // no-op, just use the error
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
+ category))
+ }
+
+ return &Error{ErrorCode: errorCode, Message: err.Error()}
+
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/http.go b/vendor/src/github.com/cloudflare/cfssl/errors/http.go
new file mode 100644
index 0000000000..c9c0a39c70
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/errors/http.go
@@ -0,0 +1,47 @@
+package errors
+
+import (
+ "errors"
+ "net/http"
+)
+
+// HTTPError is an augmented error with a HTTP status code.
+type HTTPError struct {
+ StatusCode int
+ error
+}
+
+// Error implements the error interface.
+func (e *HTTPError) Error() string {
+ return e.error.Error()
+}
+
+// NewMethodNotAllowed returns an appropriate error in the case that
+// an HTTP client uses an invalid method (i.e. a GET in place of a POST)
+// on an API endpoint.
+func NewMethodNotAllowed(method string) *HTTPError {
+ return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)}
+}
+
+// NewBadRequest creates a HttpError with the given error and error code 400.
+func NewBadRequest(err error) *HTTPError {
+ return &HTTPError{http.StatusBadRequest, err}
+}
+
+// NewBadRequestString returns a HttpError with the supplied message
+// and error code 400.
+func NewBadRequestString(s string) *HTTPError {
+ return NewBadRequest(errors.New(s))
+}
+
+// NewBadRequestMissingParameter returns a 400 HttpError as a required
+// parameter is missing in the HTTP request.
+func NewBadRequestMissingParameter(s string) *HTTPError {
+ return NewBadRequestString(`Missing parameter "` + s + `"`)
+}
+
+// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary
+// parameter is present in the HTTP request.
+func NewBadRequestUnwantedParameter(s string) *HTTPError {
+ return NewBadRequestString(`Unwanted parameter "` + s + `"`)
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go b/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go
new file mode 100644
index 0000000000..bcc7418508
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go
@@ -0,0 +1,42 @@
+// Package derhelpers implements common functionality
+// on DER encoded data
+package derhelpers
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+
+ cferr "github.com/cloudflare/cfssl/errors"
+)
+
+// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, or elliptic curve
+// DER-encoded private key. The key must not be in PEM format.
+func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
+ generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
+ if err != nil {
+ generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
+ if err != nil {
+ generalKey, err = x509.ParseECPrivateKey(keyDER)
+ if err != nil {
+ // We don't include the actual error into
+ // the final error. The reason might be
+ // we don't want to leak any info about
+ // the private key.
+ return nil, cferr.New(cferr.PrivateKeyError,
+ cferr.ParseFailed)
+ }
+ }
+ }
+
+ switch generalKey.(type) {
+ case *rsa.PrivateKey:
+ return generalKey.(*rsa.PrivateKey), nil
+ case *ecdsa.PrivateKey:
+ return generalKey.(*ecdsa.PrivateKey), nil
+ }
+
+ // should never reach here
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed)
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go b/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go
new file mode 100644
index 0000000000..74d768134a
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go
@@ -0,0 +1,478 @@
+// Package helpers implements utility functionality common to many
+// CFSSL packages.
+package helpers
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/pem"
+ "errors"
+ "io/ioutil"
+ "math/big"
+
+ "strings"
+ "time"
+
+ "github.com/cloudflare/cfssl/crypto/pkcs7"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers/derhelpers"
+ "github.com/cloudflare/cfssl/log"
+ "golang.org/x/crypto/pkcs12"
+)
+
+// OneYear is a time.Duration representing a year's worth of seconds.
+const OneYear = 8760 * time.Hour
+
+// OneDay is a time.Duration representing a day's worth of seconds.
+const OneDay = 24 * time.Hour
+
+// InclusiveDate returns the time.Time representation of a date - 1
+// nanosecond. This allows time.After to be used inclusively.
+func InclusiveDate(year int, month time.Month, day int) time.Time {
+ return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
+}
+
+// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
+// issuing certificates valid for more than 5 years.
+var Jul2012 = InclusiveDate(2012, time.July, 01)
+
+// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
+// issuing certificates valid for more than 39 months.
+var Apr2015 = InclusiveDate(2015, time.April, 01)
+
+// KeyLength returns the bit size of ECDSA or RSA PublicKey
+func KeyLength(key interface{}) int {
+ if key == nil {
+ return 0
+ }
+ if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
+ return ecdsaKey.Curve.Params().BitSize
+ } else if rsaKey, ok := key.(*rsa.PublicKey); ok {
+ return rsaKey.N.BitLen()
+ }
+
+ return 0
+}
+
+// ExpiryTime returns the time when the certificate chain is expired.
+func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
+ if len(chain) == 0 {
+ return
+ }
+
+ notAfter = chain[0].NotAfter
+ for _, cert := range chain {
+ if notAfter.After(cert.NotAfter) {
+ notAfter = cert.NotAfter
+ }
+ }
+ return
+}
+
+// MonthsValid returns the number of months for which a certificate is valid.
+func MonthsValid(c *x509.Certificate) int {
+ issued := c.NotBefore
+ expiry := c.NotAfter
+ years := (expiry.Year() - issued.Year())
+ months := years*12 + int(expiry.Month()) - int(issued.Month())
+
+ // Round up if valid for less than a full month
+ if expiry.Day() > issued.Day() {
+ months++
+ }
+ return months
+}
+
+// ValidExpiry determines if a certificate is valid for an acceptable
+// length of time per the CA/Browser Forum baseline requirements.
+// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
+func ValidExpiry(c *x509.Certificate) bool {
+ issued := c.NotBefore
+
+ var maxMonths int
+ switch {
+ case issued.After(Apr2015):
+ maxMonths = 39
+ case issued.After(Jul2012):
+ maxMonths = 60
+ case issued.Before(Jul2012):
+ maxMonths = 120
+ }
+
+ if MonthsValid(c) > maxMonths {
+ return false
+ }
+ return true
+}
+
+// SignatureString returns the TLS signature string corresponding to
+// an X509 signature algorithm.
+func SignatureString(alg x509.SignatureAlgorithm) string {
+ switch alg {
+ case x509.MD2WithRSA:
+ return "MD2WithRSA"
+ case x509.MD5WithRSA:
+ return "MD5WithRSA"
+ case x509.SHA1WithRSA:
+ return "SHA1WithRSA"
+ case x509.SHA256WithRSA:
+ return "SHA256WithRSA"
+ case x509.SHA384WithRSA:
+ return "SHA384WithRSA"
+ case x509.SHA512WithRSA:
+ return "SHA512WithRSA"
+ case x509.DSAWithSHA1:
+ return "DSAWithSHA1"
+ case x509.DSAWithSHA256:
+ return "DSAWithSHA256"
+ case x509.ECDSAWithSHA1:
+ return "ECDSAWithSHA1"
+ case x509.ECDSAWithSHA256:
+ return "ECDSAWithSHA256"
+ case x509.ECDSAWithSHA384:
+ return "ECDSAWithSHA384"
+ case x509.ECDSAWithSHA512:
+ return "ECDSAWithSHA512"
+ default:
+ return "Unknown Signature"
+ }
+}
+
+// HashAlgoString returns the hash algorithm name contains in the signature
+// method.
+func HashAlgoString(alg x509.SignatureAlgorithm) string {
+ switch alg {
+ case x509.MD2WithRSA:
+ return "MD2"
+ case x509.MD5WithRSA:
+ return "MD5"
+ case x509.SHA1WithRSA:
+ return "SHA1"
+ case x509.SHA256WithRSA:
+ return "SHA256"
+ case x509.SHA384WithRSA:
+ return "SHA384"
+ case x509.SHA512WithRSA:
+ return "SHA512"
+ case x509.DSAWithSHA1:
+ return "SHA1"
+ case x509.DSAWithSHA256:
+ return "SHA256"
+ case x509.ECDSAWithSHA1:
+ return "SHA1"
+ case x509.ECDSAWithSHA256:
+ return "SHA256"
+ case x509.ECDSAWithSHA384:
+ return "SHA384"
+ case x509.ECDSAWithSHA512:
+ return "SHA512"
+ default:
+ return "Unknown Hash Algorithm"
+ }
+}
+
+// EncodeCertificatesPEM encodes a number of x509 certficates to PEM
+func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
+ var buffer bytes.Buffer
+ for _, cert := range certs {
+ pem.Encode(&buffer, &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: cert.Raw,
+ })
+ }
+
+ return buffer.Bytes()
+}
+
+// EncodeCertificatePEM encodes a single x509 certficates to PEM
+func EncodeCertificatePEM(cert *x509.Certificate) []byte {
+ return EncodeCertificatesPEM([]*x509.Certificate{cert})
+}
+
+// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
+// can handle PEM encoded PKCS #7 structures.
+func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
+ var certs []*x509.Certificate
+ var err error
+ certsPEM = bytes.TrimSpace(certsPEM)
+ for len(certsPEM) > 0 {
+ var cert []*x509.Certificate
+ cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
+ if err != nil {
+
+ return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
+ } else if cert == nil {
+ break
+ }
+
+ certs = append(certs, cert...)
+ }
+ if len(certsPEM) > 0 {
+ return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ }
+ return certs, nil
+}
+
+// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
+// either PKCS #7, PKCS #12, or raw x509.
+func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
+ certsDER = bytes.TrimSpace(certsDER)
+ pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
+ if err != nil {
+ var pkcs12data interface{}
+ certs = make([]*x509.Certificate, 1)
+ pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
+ if err != nil {
+ certs, err = x509.ParseCertificates(certsDER)
+ if err != nil {
+ return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ }
+ } else {
+ key = pkcs12data.(crypto.Signer)
+ }
+ } else {
+ if pkcs7data.ContentInfo != "SignedData" {
+ return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info"))
+ }
+ certs = pkcs7data.Content.SignedData.Certificates
+ }
+ if certs == nil {
+ return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ }
+ return certs, key, nil
+}
+
+// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
+func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
+ cert, err := ParseCertificatePEM(certPEM)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err)
+ }
+ return cert, nil
+}
+
+// ParseCertificatePEM parses and returns a PEM-encoded certificate,
+// can handle PEM encoded PKCS #7 structures.
+func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
+ certPEM = bytes.TrimSpace(certPEM)
+ cert, rest, err := ParseOneCertificateFromPEM(certPEM)
+ if err != nil {
+ // Log the actual parsing error but throw a default parse error message.
+ log.Debugf("Certificate parsing error: %v", err)
+ return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
+ } else if cert == nil {
+ return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ } else if len(rest) > 0 {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object"))
+ } else if len(cert) > 1 {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
+ }
+ return cert[0], nil
+}
+
+// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
+// either a raw x509 certificate or a PKCS #7 structure possibly containing
+// multiple certificates, from the top of certsPEM, which itself may
+// contain multiple PEM encoded certificate objects.
+func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
+
+ block, rest := pem.Decode(certsPEM)
+ if block == nil {
+ return nil, rest, nil
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
+ if err != nil {
+ return nil, rest, err
+ }
+ if pkcs7data.ContentInfo != "SignedData" {
+ return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
+ }
+ certs := pkcs7data.Content.SignedData.Certificates
+ if certs == nil {
+ return nil, rest, errors.New("PKCS #7 structure contains no certificates")
+ }
+ return certs, rest, nil
+ }
+ var certs = []*x509.Certificate{cert}
+ return certs, rest, nil
+}
+
+// LoadPEMCertPool loads a pool of PEM certificates from file.
+func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
+ pemCerts, err := ioutil.ReadFile(certsFile)
+ if err != nil {
+ return nil, err
+ }
+
+ certPool := x509.NewCertPool()
+ if !certPool.AppendCertsFromPEM(pemCerts) {
+ return nil, errors.New("failed to load cert pool")
+ }
+
+ return certPool, nil
+}
+
+// ParsePrivateKeyPEM parses and returns a PEM-encoded private
+// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
+// or elliptic private key.
+func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
+ return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
+}
+
+// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
+// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
+// or elliptic private key.
+func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
+ keyDER, err := GetKeyDERFromPEM(keyPEM, password)
+ if err != nil {
+ return nil, err
+ }
+
+ return derhelpers.ParsePrivateKeyDER(keyDER)
+}
+
+// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
+func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
+ keyDER, _ := pem.Decode(in)
+ if keyDER != nil {
+ if procType, ok := keyDER.Headers["Proc-Type"]; ok {
+ if strings.Contains(procType, "ENCRYPTED") {
+ if password != nil {
+ return x509.DecryptPEMBlock(keyDER, password)
+ }
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted)
+ }
+ }
+ return keyDER.Bytes, nil
+ }
+
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed)
+}
+
+// CheckSignature verifies a signature made by the key on a CSR, such
+// as on the CSR itself.
+func CheckSignature(csr *x509.CertificateRequest, algo x509.SignatureAlgorithm, signed, signature []byte) error {
+ var hashType crypto.Hash
+
+ switch algo {
+ case x509.SHA1WithRSA, x509.ECDSAWithSHA1:
+ hashType = crypto.SHA1
+ case x509.SHA256WithRSA, x509.ECDSAWithSHA256:
+ hashType = crypto.SHA256
+ case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
+ hashType = crypto.SHA384
+ case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
+ hashType = crypto.SHA512
+ default:
+ return x509.ErrUnsupportedAlgorithm
+ }
+
+ if !hashType.Available() {
+ return x509.ErrUnsupportedAlgorithm
+ }
+ h := hashType.New()
+
+ h.Write(signed)
+ digest := h.Sum(nil)
+
+ switch pub := csr.PublicKey.(type) {
+ case *rsa.PublicKey:
+ return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
+ case *ecdsa.PublicKey:
+ ecdsaSig := new(struct{ R, S *big.Int })
+ if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
+ return err
+ }
+ if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+ return errors.New("x509: ECDSA signature contained zero or negative values")
+ }
+ if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
+ return errors.New("x509: ECDSA verification failure")
+ }
+ return nil
+ }
+ return x509.ErrUnsupportedAlgorithm
+}
+
+// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
+func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
+ in = bytes.TrimSpace(in)
+ p, rest := pem.Decode(in)
+ if p != nil {
+ if p.Type != "CERTIFICATE REQUEST" {
+ return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest)
+ }
+
+ csr, err = x509.ParseCertificateRequest(p.Bytes)
+ } else {
+ csr, err = x509.ParseCertificateRequest(in)
+ }
+
+ if err != nil {
+ return nil, rest, err
+ }
+
+ err = CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
+ if err != nil {
+ return nil, rest, err
+ }
+
+ return csr, rest, nil
+}
+
+// ParseCSRPEM parses a PEM-encoded certificiate signing request.
+// It does not check the signature. This is useful for dumping data from a CSR
+// locally.
+func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
+ block, _ := pem.Decode([]byte(csrPEM))
+ der := block.Bytes
+ csrObject, err := x509.ParseCertificateRequest(der)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return csrObject, nil
+}
+
+// SignerAlgo returns an X.509 signature algorithm corresponding to
+// the crypto.Hash provided from a crypto.Signer.
+func SignerAlgo(priv crypto.Signer, h crypto.Hash) x509.SignatureAlgorithm {
+ switch priv.Public().(type) {
+ case *rsa.PublicKey:
+ switch h {
+ case crypto.SHA512:
+ return x509.SHA512WithRSA
+ case crypto.SHA384:
+ return x509.SHA384WithRSA
+ case crypto.SHA256:
+ return x509.SHA256WithRSA
+ default:
+ return x509.SHA1WithRSA
+ }
+ case *ecdsa.PublicKey:
+ switch h {
+ case crypto.SHA512:
+ return x509.ECDSAWithSHA512
+ case crypto.SHA384:
+ return x509.ECDSAWithSHA384
+ case crypto.SHA256:
+ return x509.ECDSAWithSHA256
+ default:
+ return x509.ECDSAWithSHA1
+ }
+ default:
+ return x509.UnknownSignatureAlgorithm
+ }
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/info/info.go b/vendor/src/github.com/cloudflare/cfssl/info/info.go
new file mode 100644
index 0000000000..926a411ffb
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/info/info.go
@@ -0,0 +1,15 @@
+// Package info contains the definitions for the info endpoint
+package info
+
+// Req is the request struct for an info API request.
+type Req struct {
+ Label string `json:"label"`
+ Profile string `json:"profile"`
+}
+
+// Resp is the response for an Info API request.
+type Resp struct {
+ Certificate string `json:"certificate"`
+ Usage []string `json:"usages"`
+ ExpiryString string `json:"expiry"`
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/initca/initca.go b/vendor/src/github.com/cloudflare/cfssl/initca/initca.go
new file mode 100644
index 0000000000..aede763f4e
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/initca/initca.go
@@ -0,0 +1,278 @@
+// Package initca contains code to initialise a certificate authority,
+// generating a new root key and certificate.
+package initca
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "io/ioutil"
+ "net"
+ "time"
+
+ "github.com/cloudflare/cfssl/config"
+ "github.com/cloudflare/cfssl/csr"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/log"
+ "github.com/cloudflare/cfssl/signer"
+ "github.com/cloudflare/cfssl/signer/local"
+)
+
+// validator contains the default validation logic for certificate
+// authority certificates. The only requirement here is that the
+// certificate have a non-empty subject field.
+func validator(req *csr.CertificateRequest) error {
+ if req.CN != "" {
+ return nil
+ }
+
+ if len(req.Names) == 0 {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information"))
+ }
+
+ for i := range req.Names {
+ if csr.IsNameEmpty(req.Names[i]) {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information"))
+ }
+ }
+
+ return nil
+}
+
+// New creates a new root certificate from the certificate request.
+func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) {
+ if req.CA != nil {
+ if req.CA.Expiry != "" {
+ CAPolicy.Default.ExpiryString = req.CA.Expiry
+ CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
+ }
+
+ if req.CA.PathLength != 0 {
+ signer.MaxPathLen = req.CA.PathLength
+ }
+ }
+
+ g := &csr.Generator{Validator: validator}
+ csrPEM, key, err = g.ProcessRequest(req)
+ if err != nil {
+ log.Errorf("failed to process request: %v", err)
+ key = nil
+ return
+ }
+
+ priv, err := helpers.ParsePrivateKeyPEM(key)
+ if err != nil {
+ log.Errorf("failed to parse private key: %v", err)
+ return
+ }
+
+ s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil)
+ if err != nil {
+ log.Errorf("failed to create signer: %v", err)
+ return
+ }
+ s.SetPolicy(CAPolicy)
+
+ signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)}
+ cert, err = s.Sign(signReq)
+
+ return
+
+}
+
+// NewFromPEM creates a new root certificate from the key file passed in.
+func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) {
+ privData, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ priv, err := helpers.ParsePrivateKeyPEM(privData)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return NewFromSigner(req, priv)
+}
+
+// RenewFromPEM re-creates a root certificate from the CA cert and key
+// files. The resulting root certificate will have the input CA certificate
+// as the template and have the same expiry length. E.g. the exsiting CA
+// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate
+// will be valid from now and expire in one year as well.
+func RenewFromPEM(caFile, keyFile string) ([]byte, error) {
+ caBytes, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, err
+ }
+
+ ca, err := helpers.ParseCertificatePEM(caBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ keyBytes, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := helpers.ParsePrivateKeyPEM(keyBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return RenewFromSigner(ca, key)
+
+}
+
+// NewFromSigner creates a new root certificate from a crypto.Signer.
+func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) {
+ if req.CA != nil {
+ if req.CA.Expiry != "" {
+ CAPolicy.Default.ExpiryString = req.CA.Expiry
+ CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ if req.CA.PathLength != 0 {
+ signer.MaxPathLen = req.CA.PathLength
+ }
+ }
+
+ var sigAlgo x509.SignatureAlgorithm
+ switch pub := priv.Public().(type) {
+ case *rsa.PublicKey:
+ bitLength := pub.N.BitLen()
+ switch {
+ case bitLength >= 4096:
+ sigAlgo = x509.SHA512WithRSA
+ case bitLength >= 3072:
+ sigAlgo = x509.SHA384WithRSA
+ case bitLength >= 2048:
+ sigAlgo = x509.SHA256WithRSA
+ default:
+ sigAlgo = x509.SHA1WithRSA
+ }
+ case *ecdsa.PublicKey:
+ switch pub.Curve {
+ case elliptic.P521():
+ sigAlgo = x509.ECDSAWithSHA512
+ case elliptic.P384():
+ sigAlgo = x509.ECDSAWithSHA384
+ case elliptic.P256():
+ sigAlgo = x509.ECDSAWithSHA256
+ default:
+ sigAlgo = x509.ECDSAWithSHA1
+ }
+ default:
+ sigAlgo = x509.UnknownSignatureAlgorithm
+ }
+
+ var tpl = x509.CertificateRequest{
+ Subject: req.Name(),
+ SignatureAlgorithm: sigAlgo,
+ }
+
+ for i := range req.Hosts {
+ if ip := net.ParseIP(req.Hosts[i]); ip != nil {
+ tpl.IPAddresses = append(tpl.IPAddresses, ip)
+ } else {
+ tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
+ }
+ }
+
+ return signWithCSR(&tpl, priv)
+}
+
+// signWithCSR creates a new root certificate from signing a X509.CertificateRequest
+// by a crypto.Signer.
+func signWithCSR(tpl *x509.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) {
+ csrPEM, err = x509.CreateCertificateRequest(rand.Reader, tpl, priv)
+ if err != nil {
+ log.Errorf("failed to generate a CSR: %v", err)
+ // The use of CertificateError was a matter of some
+ // debate; it is the one edge case in which a new
+ // error category specifically for CSRs might be
+ // useful, but it was deemed that one edge case did
+ // not a new category justify.
+ err = cferr.Wrap(cferr.CertificateError, cferr.BadRequest, err)
+ return
+ }
+
+ p := &pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csrPEM,
+ }
+ csrPEM = pem.EncodeToMemory(p)
+
+ s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil)
+ if err != nil {
+ log.Errorf("failed to create signer: %v", err)
+ return
+ }
+ s.SetPolicy(CAPolicy)
+
+ signReq := signer.SignRequest{Request: string(csrPEM)}
+ cert, err = s.Sign(signReq)
+ return
+}
+
+// RenewFromSigner re-creates a root certificate from the CA cert and crypto.Signer.
+// The resulting root certificate will have ca certificate
+// as the template and have the same expiry length. E.g. the exsiting CA
+// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate
+// will be valid from now and expire in one year as well.
+func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) {
+ if !ca.IsCA {
+ return nil, errors.New("input certificate is not a CA cert")
+ }
+
+ // matching certificate public key vs private key
+ switch {
+ case ca.PublicKeyAlgorithm == x509.RSA:
+
+ var rsaPublicKey *rsa.PublicKey
+ var ok bool
+ if rsaPublicKey, ok = priv.Public().(*rsa.PublicKey); !ok {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ if ca.PublicKey.(*rsa.PublicKey).N.Cmp(rsaPublicKey.N) != 0 {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ case ca.PublicKeyAlgorithm == x509.ECDSA:
+ var ecdsaPublicKey *ecdsa.PublicKey
+ var ok bool
+ if ecdsaPublicKey, ok = priv.Public().(*ecdsa.PublicKey); !ok {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ if ca.PublicKey.(*ecdsa.PublicKey).X.Cmp(ecdsaPublicKey.X) != 0 {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ default:
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.NotRSAOrECC)
+ }
+
+ req := csr.ExtractCertificateRequest(ca)
+
+ cert, _, err := NewFromSigner(req, priv)
+ return cert, err
+
+}
+
+// CAPolicy contains the CA issuing policy as default policy.
+var CAPolicy = &config.Signing{
+ Default: &config.SigningProfile{
+ Usage: []string{"cert sign", "crl sign"},
+ ExpiryString: "43800h",
+ Expiry: 5 * helpers.OneYear,
+ CA: true,
+ },
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/log/log.go b/vendor/src/github.com/cloudflare/cfssl/log/log.go
new file mode 100644
index 0000000000..b517c44eea
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/log/log.go
@@ -0,0 +1,174 @@
+// Package log implements a wrapper around the Go standard library's
+// logging package. Clients should set the current log level; only
+// messages below that level will actually be logged. For example, if
+// Level is set to LevelWarning, only log messages at the Warning,
+// Error, and Critical levels will be logged.
+package log
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+)
+
+// The following constants represent logging levels in increasing levels of seriousness.
+const (
+ // LevelDebug is the log level for Debug statements.
+ LevelDebug = iota
+ // LevelInfo is the log level for Info statements.
+ LevelInfo
+ // LevelWarning is the log level for Warning statements.
+ LevelWarning
+ // LevelError is the log level for Error statements.
+ LevelError
+ // LevelCritical is the log level for Critical statements.
+ LevelCritical
+ // LevelFatal is the log level for Fatal statements.
+ LevelFatal
+)
+
+var levelPrefix = [...]string{
+ LevelDebug: "DEBUG",
+ LevelInfo: "INFO",
+ LevelWarning: "WARNING",
+ LevelError: "ERROR",
+ LevelCritical: "CRITICAL",
+ LevelFatal: "FATAL",
+}
+
+// Level stores the current logging level.
+var Level = LevelInfo
+
+// SyslogWriter specifies the necessary methods for an alternate output
+// destination passed in via SetLogger.
+//
+// SyslogWriter is satisfied by *syslog.Writer.
+type SyslogWriter interface {
+ Debug(string) error
+ Info(string) error
+ Warning(string) error
+ Err(string) error
+ Crit(string) error
+ Emerg(string) error
+}
+
+// syslogWriter stores the SetLogger() parameter.
+var syslogWriter SyslogWriter
+
+// SetLogger sets the output used for output by this package.
+// A *syslog.Writer is a good choice for the logger parameter.
+// Call with a nil parameter to revert to default behavior.
+func SetLogger(logger SyslogWriter) {
+ syslogWriter = logger
+}
+
+func init() {
+ // Only define loglevel flag once.
+ if flag.Lookup("loglevel") == nil {
+ flag.IntVar(&Level, "loglevel", LevelInfo, "Log level (0 = DEBUG, 5 = FATAL)")
+ }
+}
+
+func print(l int, msg string) {
+ if l >= Level {
+ if syslogWriter != nil {
+ var err error
+ switch l {
+ case LevelDebug:
+ err = syslogWriter.Debug(msg)
+ case LevelInfo:
+ err = syslogWriter.Info(msg)
+ case LevelWarning:
+ err = syslogWriter.Warning(msg)
+ case LevelError:
+ err = syslogWriter.Err(msg)
+ case LevelCritical:
+ err = syslogWriter.Crit(msg)
+ case LevelFatal:
+ err = syslogWriter.Emerg(msg)
+ }
+ if err != nil {
+ log.Printf("Unable to write syslog: %v for msg: %s\n", err, msg)
+ }
+ } else {
+ log.Printf("[%s] %s", levelPrefix[l], msg)
+ }
+ }
+}
+
+func outputf(l int, format string, v []interface{}) {
+ print(l, fmt.Sprintf(format, v...))
+}
+
+func output(l int, v []interface{}) {
+ print(l, fmt.Sprint(v...))
+}
+
+// Fatalf logs a formatted message at the "fatal" level and then exits. The
+// arguments are handled in the same manner as fmt.Printf.
+func Fatalf(format string, v ...interface{}) {
+ outputf(LevelFatal, format, v)
+ os.Exit(1)
+}
+
+// Fatal logs its arguments at the "fatal" level and then exits.
+func Fatal(v ...interface{}) {
+ output(LevelFatal, v)
+ os.Exit(1)
+}
+
+// Criticalf logs a formatted message at the "critical" level. The
+// arguments are handled in the same manner as fmt.Printf.
+func Criticalf(format string, v ...interface{}) {
+ outputf(LevelCritical, format, v)
+}
+
+// Critical logs its arguments at the "critical" level.
+func Critical(v ...interface{}) {
+ output(LevelCritical, v)
+}
+
+// Errorf logs a formatted message at the "error" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Errorf(format string, v ...interface{}) {
+ outputf(LevelError, format, v)
+}
+
+// Error logs its arguments at the "error" level.
+func Error(v ...interface{}) {
+ output(LevelError, v)
+}
+
+// Warningf logs a formatted message at the "warning" level. The
+// arguments are handled in the same manner as fmt.Printf.
+func Warningf(format string, v ...interface{}) {
+ outputf(LevelWarning, format, v)
+}
+
+// Warning logs its arguments at the "warning" level.
+func Warning(v ...interface{}) {
+ output(LevelWarning, v)
+}
+
+// Infof logs a formatted message at the "info" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Infof(format string, v ...interface{}) {
+ outputf(LevelInfo, format, v)
+}
+
+// Info logs its arguments at the "info" level.
+func Info(v ...interface{}) {
+ output(LevelInfo, v)
+}
+
+// Debugf logs a formatted message at the "debug" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Debugf(format string, v ...interface{}) {
+ outputf(LevelDebug, format, v)
+}
+
+// Debug logs its arguments at the "debug" level.
+func Debug(v ...interface{}) {
+ output(LevelDebug, v)
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go b/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go
new file mode 100644
index 0000000000..a19b113d4e
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go
@@ -0,0 +1,13 @@
+// Package config in the ocsp directory provides configuration data for an OCSP
+// signer.
+package config
+
+import "time"
+
+// Config contains configuration information required to set up an OCSP signer.
+type Config struct {
+ CACertFile string
+ ResponderCertFile string
+ KeyFile string
+ Interval time.Duration
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go b/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go
new file mode 100644
index 0000000000..ac16ae4a9e
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go
@@ -0,0 +1,447 @@
+// Package local implements certificate signature functionality for CFSSL.
+package local
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "net/mail"
+ "os"
+
+ "github.com/cloudflare/cfssl/certdb"
+ "github.com/cloudflare/cfssl/config"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/info"
+ "github.com/cloudflare/cfssl/log"
+ "github.com/cloudflare/cfssl/signer"
+ "github.com/google/certificate-transparency/go"
+ "github.com/google/certificate-transparency/go/client"
+)
+
+// Signer contains a signer that uses the standard library to
+// support both ECDSA and RSA CA keys.
+type Signer struct {
+ ca *x509.Certificate
+ priv crypto.Signer
+ policy *config.Signing
+ sigAlgo x509.SignatureAlgorithm
+ dbAccessor certdb.Accessor
+}
+
+// NewSigner creates a new Signer directly from a
+// private key and certificate, with optional policy.
+func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) {
+ if policy == nil {
+ policy = &config.Signing{
+ Profiles: map[string]*config.SigningProfile{},
+ Default: config.DefaultConfig()}
+ }
+
+ if !policy.Valid() {
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+
+ return &Signer{
+ ca: cert,
+ priv: priv,
+ sigAlgo: sigAlgo,
+ policy: policy,
+ }, nil
+}
+
+// NewSignerFromFile generates a new local signer from a caFile
+// and a caKey file, both PEM encoded.
+func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) {
+ log.Debug("Loading CA: ", caFile)
+ ca, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("Loading CA key: ", caKeyFile)
+ cakey, err := ioutil.ReadFile(caKeyFile)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err)
+ }
+
+ parsedCa, err := helpers.ParseCertificatePEM(ca)
+ if err != nil {
+ return nil, err
+ }
+
+ strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD")
+ password := []byte(strPassword)
+ if strPassword == "" {
+ password = nil
+ }
+
+ priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
+ if err != nil {
+ log.Debug("Malformed private key %v", err)
+ return nil, err
+ }
+
+ return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy)
+}
+
+func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) {
+ err = signer.FillTemplate(template, s.policy.Default, profile)
+ if err != nil {
+ return
+ }
+
+ var initRoot bool
+ if s.ca == nil {
+ if !template.IsCA {
+ err = cferr.New(cferr.PolicyError, cferr.InvalidRequest)
+ return
+ }
+ template.DNSNames = nil
+ template.EmailAddresses = nil
+ s.ca = template
+ initRoot = true
+ template.MaxPathLen = signer.MaxPathLen
+ } else if template.IsCA {
+ template.MaxPathLen = 1
+ template.DNSNames = nil
+ template.EmailAddresses = nil
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
+ }
+ if initRoot {
+ s.ca, err = x509.ParseCertificate(derBytes)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ }
+
+ cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+ log.Infof("signed certificate with serial number %d", template.SerialNumber)
+ return
+}
+
+// replaceSliceIfEmpty replaces the contents of replaced with newContents if
+// the slice referenced by replaced is empty
+func replaceSliceIfEmpty(replaced, newContents *[]string) {
+ if len(*replaced) == 0 {
+ *replaced = *newContents
+ }
+}
+
+// PopulateSubjectFromCSR has functionality similar to Name, except
+// it fills the fields of the resulting pkix.Name with req's if the
+// subject's corresponding fields are empty
+func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name {
+ // if no subject, use req
+ if s == nil {
+ return req
+ }
+
+ name := s.Name()
+
+ if name.CommonName == "" {
+ name.CommonName = req.CommonName
+ }
+
+ replaceSliceIfEmpty(&name.Country, &req.Country)
+ replaceSliceIfEmpty(&name.Province, &req.Province)
+ replaceSliceIfEmpty(&name.Locality, &req.Locality)
+ replaceSliceIfEmpty(&name.Organization, &req.Organization)
+ replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit)
+ if name.SerialNumber == "" {
+ name.SerialNumber = req.SerialNumber
+ }
+ return name
+}
+
+// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the
+// content of hosts, if it is not nil.
+func OverrideHosts(template *x509.Certificate, hosts []string) {
+ if hosts != nil {
+ template.IPAddresses = []net.IP{}
+ template.EmailAddresses = []string{}
+ template.DNSNames = []string{}
+ }
+
+ for i := range hosts {
+ if ip := net.ParseIP(hosts[i]); ip != nil {
+ template.IPAddresses = append(template.IPAddresses, ip)
+ } else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil {
+ template.EmailAddresses = append(template.EmailAddresses, email.Address)
+ } else {
+ template.DNSNames = append(template.DNSNames, hosts[i])
+ }
+ }
+
+}
+
+// Sign signs a new certificate based on the PEM-encoded client
+// certificate or certificate request with the signing profile,
+// specified by profileName.
+func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
+ profile, err := signer.Profile(s, req.Profile)
+ if err != nil {
+ return
+ }
+
+ block, _ := pem.Decode([]byte(req.Request))
+ if block == nil {
+ return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed)
+ }
+
+ if block.Type != "CERTIFICATE REQUEST" {
+ return nil, cferr.Wrap(cferr.CSRError,
+ cferr.BadRequest, errors.New("not a certificate or csr"))
+ }
+
+ csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Copy out only the fields from the CSR authorized by policy.
+ safeTemplate := x509.Certificate{}
+ // If the profile contains no explicit whitelist, assume that all fields
+ // should be copied from the CSR.
+ if profile.CSRWhitelist == nil {
+ safeTemplate = *csrTemplate
+ } else {
+ if profile.CSRWhitelist.Subject {
+ safeTemplate.Subject = csrTemplate.Subject
+ }
+ if profile.CSRWhitelist.PublicKeyAlgorithm {
+ safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm
+ }
+ if profile.CSRWhitelist.PublicKey {
+ safeTemplate.PublicKey = csrTemplate.PublicKey
+ }
+ if profile.CSRWhitelist.SignatureAlgorithm {
+ safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm
+ }
+ if profile.CSRWhitelist.DNSNames {
+ safeTemplate.DNSNames = csrTemplate.DNSNames
+ }
+ if profile.CSRWhitelist.IPAddresses {
+ safeTemplate.IPAddresses = csrTemplate.IPAddresses
+ }
+ if profile.CSRWhitelist.EmailAddresses {
+ safeTemplate.EmailAddresses = csrTemplate.EmailAddresses
+ }
+ }
+
+ OverrideHosts(&safeTemplate, req.Hosts)
+ safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject)
+
+ // If there is a whitelist, ensure that both the Common Name and SAN DNSNames match
+ if profile.NameWhitelist != nil {
+ if safeTemplate.Subject.CommonName != "" {
+ if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil {
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+ }
+ for _, name := range safeTemplate.DNSNames {
+ if profile.NameWhitelist.Find([]byte(name)) == nil {
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+ }
+ for _, name := range safeTemplate.EmailAddresses {
+ if profile.NameWhitelist.Find([]byte(name)) == nil {
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+ }
+ }
+
+ if profile.ClientProvidesSerialNumbers {
+ if req.Serial == nil {
+ return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial)
+ }
+ safeTemplate.SerialNumber = req.Serial
+ } else {
+ // RFC 5280 4.1.2.2:
+ // Certificate users MUST be able to handle serialNumber
+ // values up to 20 octets. Conforming CAs MUST NOT use
+ // serialNumber values longer than 20 octets.
+ //
+ // If CFSSL is providing the serial numbers, it makes
+ // sense to use the max supported size.
+ serialNumber := make([]byte, 20)
+ _, err = io.ReadFull(rand.Reader, serialNumber)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
+ }
+
+ // SetBytes interprets buf as the bytes of a big-endian
+ // unsigned integer. The leading byte should be masked
+ // off to ensure it isn't negative.
+ serialNumber[0] &= 0x7F
+
+ safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber)
+ }
+
+ if len(req.Extensions) > 0 {
+ for _, ext := range req.Extensions {
+ oid := asn1.ObjectIdentifier(ext.ID)
+ if !profile.ExtensionWhitelist[oid.String()] {
+ return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
+ }
+
+ rawValue, err := hex.DecodeString(ext.Value)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err)
+ }
+
+ safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{
+ Id: oid,
+ Critical: ext.Critical,
+ Value: rawValue,
+ })
+ }
+ }
+
+ var certTBS = safeTemplate
+
+ if len(profile.CTLogServers) > 0 {
+ // Add a poison extension which prevents validation
+ var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}}
+ var poisonedPreCert = certTBS
+ poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension)
+ cert, err = s.sign(&poisonedPreCert, profile)
+ if err != nil {
+ return
+ }
+
+ derCert, _ := pem.Decode(cert)
+ prechain := []ct.ASN1Cert{derCert.Bytes, s.ca.Raw}
+ var sctList []ct.SignedCertificateTimestamp
+
+ for _, server := range profile.CTLogServers {
+ log.Infof("submitting poisoned precertificate to %s", server)
+ var ctclient = client.New(server)
+ var resp *ct.SignedCertificateTimestamp
+ resp, err = ctclient.AddPreChain(prechain)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err)
+ }
+ sctList = append(sctList, *resp)
+ }
+
+ var serializedSCTList []byte
+ serializedSCTList, err = serializeSCTList(sctList)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
+ }
+
+ // Serialize again as an octet string before embedding
+ serializedSCTList, err = asn1.Marshal(serializedSCTList)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
+ }
+
+ var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList}
+ certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension)
+ }
+ var signedCert []byte
+ signedCert, err = s.sign(&certTBS, profile)
+ if err != nil {
+ return nil, err
+ }
+
+ if s.dbAccessor != nil {
+ var certRecord = certdb.CertificateRecord{
+ Serial: certTBS.SerialNumber.String(),
+ // this relies on the specific behavior of x509.CreateCertificate
+ // which updates certTBS AuthorityKeyId from the signer's SubjectKeyId
+ AKI: hex.EncodeToString(certTBS.AuthorityKeyId),
+ CALabel: req.Label,
+ Status: "good",
+ Expiry: certTBS.NotAfter,
+ PEM: string(signedCert),
+ }
+
+ err = s.dbAccessor.InsertCertificate(certRecord)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("saved certificate with serial number ", certTBS.SerialNumber)
+ }
+
+ return signedCert, nil
+}
+
+func serializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
+ var buf bytes.Buffer
+ for _, sct := range sctList {
+ sct, err := ct.SerializeSCT(sct)
+ if err != nil {
+ return nil, err
+ }
+ binary.Write(&buf, binary.BigEndian, uint16(len(sct)))
+ buf.Write(sct)
+ }
+
+ var sctListLengthField = make([]byte, 2)
+ binary.BigEndian.PutUint16(sctListLengthField, uint16(buf.Len()))
+ return bytes.Join([][]byte{sctListLengthField, buf.Bytes()}, nil), nil
+}
+
+// Info return a populated info.Resp struct or an error.
+func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) {
+ cert, err := s.Certificate(req.Label, req.Profile)
+ if err != nil {
+ return
+ }
+
+ profile, err := signer.Profile(s, req.Profile)
+ if err != nil {
+ return
+ }
+
+ resp = new(info.Resp)
+ if cert.Raw != nil {
+ resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})))
+ }
+ resp.Usage = profile.Usage
+ resp.ExpiryString = profile.ExpiryString
+
+ return
+}
+
+// SigAlgo returns the RSA signer's signature algorithm.
+func (s *Signer) SigAlgo() x509.SignatureAlgorithm {
+ return s.sigAlgo
+}
+
+// Certificate returns the signer's certificate.
+func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) {
+ cert := *s.ca
+ return &cert, nil
+}
+
+// SetPolicy sets the signer's signature policy.
+func (s *Signer) SetPolicy(policy *config.Signing) {
+ s.policy = policy
+}
+
+// SetDBAccessor sets the signers' cert db accessor
+func (s *Signer) SetDBAccessor(dba certdb.Accessor) {
+ s.dbAccessor = dba
+}
+
+// Policy returns the signer's policy.
+func (s *Signer) Policy() *config.Signing {
+ return s.policy
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/signer/signer.go b/vendor/src/github.com/cloudflare/cfssl/signer/signer.go
new file mode 100644
index 0000000000..2911cfc285
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/signer/signer.go
@@ -0,0 +1,385 @@
+// Package signer implements certificate signature functionality for CFSSL.
+package signer
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "math/big"
+ "strings"
+ "time"
+
+ "github.com/cloudflare/cfssl/certdb"
+ "github.com/cloudflare/cfssl/config"
+ "github.com/cloudflare/cfssl/csr"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/info"
+)
+
+// MaxPathLen is the default path length for a new CA certificate.
+var MaxPathLen = 2
+
+// Subject contains the information that should be used to override the
+// subject information when signing a certificate.
+type Subject struct {
+ CN string
+ Names []csr.Name `json:"names"`
+ SerialNumber string
+}
+
+// Extension represents a raw extension to be included in the certificate. The
+// "value" field must be hex encoded.
+type Extension struct {
+ ID config.OID `json:"id"`
+ Critical bool `json:"critical"`
+ Value string `json:"value"`
+}
+
+// SignRequest stores a signature request, which contains the hostname,
+// the CSR, optional subject information, and the signature profile.
+//
+// Extensions provided in the signRequest are copied into the certificate, as
+// long as they are in the ExtensionWhitelist for the signer's policy.
+// Extensions requested in the CSR are ignored, except for those processed by
+// ParseCertificateRequest (mainly subjectAltName).
+type SignRequest struct {
+ Hosts []string `json:"hosts"`
+ Request string `json:"certificate_request"`
+ Subject *Subject `json:"subject,omitempty"`
+ Profile string `json:"profile"`
+ Label string `json:"label"`
+ Serial *big.Int `json:"serial,omitempty"`
+ Extensions []Extension `json:"extensions,omitempty"`
+}
+
+// appendIf appends to a if s is not an empty string.
+func appendIf(s string, a *[]string) {
+ if s != "" {
+ *a = append(*a, s)
+ }
+}
+
+// Name returns the PKIX name for the subject.
+func (s *Subject) Name() pkix.Name {
+ var name pkix.Name
+ name.CommonName = s.CN
+
+ for _, n := range s.Names {
+ appendIf(n.C, &name.Country)
+ appendIf(n.ST, &name.Province)
+ appendIf(n.L, &name.Locality)
+ appendIf(n.O, &name.Organization)
+ appendIf(n.OU, &name.OrganizationalUnit)
+ }
+ name.SerialNumber = s.SerialNumber
+ return name
+}
+
+// SplitHosts takes a comma-spearated list of hosts and returns a slice
+// with the hosts split
+func SplitHosts(hostList string) []string {
+ if hostList == "" {
+ return nil
+ }
+
+ return strings.Split(hostList, ",")
+}
+
+// A Signer contains a CA's certificate and private key for signing
+// certificates, a Signing policy to refer to and a SignatureAlgorithm.
+type Signer interface {
+ Info(info.Req) (*info.Resp, error)
+ Policy() *config.Signing
+ SetDBAccessor(certdb.Accessor)
+ SetPolicy(*config.Signing)
+ SigAlgo() x509.SignatureAlgorithm
+ Sign(req SignRequest) (cert []byte, err error)
+}
+
+// Profile gets the specific profile from the signer
+func Profile(s Signer, profile string) (*config.SigningProfile, error) {
+ var p *config.SigningProfile
+ policy := s.Policy()
+ if policy != nil && policy.Profiles != nil && profile != "" {
+ p = policy.Profiles[profile]
+ }
+
+ if p == nil && policy != nil {
+ p = policy.Default
+ }
+
+ if p == nil {
+ return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil"))
+ }
+ return p, nil
+}
+
+// DefaultSigAlgo returns an appropriate X.509 signature algorithm given
+// the CA's private key.
+func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
+ pub := priv.Public()
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ keySize := pub.N.BitLen()
+ switch {
+ case keySize >= 4096:
+ return x509.SHA512WithRSA
+ case keySize >= 3072:
+ return x509.SHA384WithRSA
+ case keySize >= 2048:
+ return x509.SHA256WithRSA
+ default:
+ return x509.SHA1WithRSA
+ }
+ case *ecdsa.PublicKey:
+ switch pub.Curve {
+ case elliptic.P256():
+ return x509.ECDSAWithSHA256
+ case elliptic.P384():
+ return x509.ECDSAWithSHA384
+ case elliptic.P521():
+ return x509.ECDSAWithSHA512
+ default:
+ return x509.ECDSAWithSHA1
+ }
+ default:
+ return x509.UnknownSignatureAlgorithm
+ }
+}
+
+// ParseCertificateRequest takes an incoming certificate request and
+// builds a certificate template from it.
+func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) {
+ csr, err := x509.ParseCertificateRequest(csrBytes)
+ if err != nil {
+ err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
+ return
+ }
+
+ err = helpers.CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
+ if err != nil {
+ err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err)
+ return
+ }
+
+ template = &x509.Certificate{
+ Subject: csr.Subject,
+ PublicKeyAlgorithm: csr.PublicKeyAlgorithm,
+ PublicKey: csr.PublicKey,
+ SignatureAlgorithm: s.SigAlgo(),
+ DNSNames: csr.DNSNames,
+ IPAddresses: csr.IPAddresses,
+ EmailAddresses: csr.EmailAddresses,
+ }
+
+ return
+}
+
+type subjectPublicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ SubjectPublicKey asn1.BitString
+}
+
+// ComputeSKI derives an SKI from the certificate's public key in a
+// standard manner. This is done by computing the SHA-1 digest of the
+// SubjectPublicKeyInfo component of the certificate.
+func ComputeSKI(template *x509.Certificate) ([]byte, error) {
+ pub := template.PublicKey
+ encodedPub, err := x509.MarshalPKIXPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+
+ var subPKI subjectPublicKeyInfo
+ _, err = asn1.Unmarshal(encodedPub, &subPKI)
+ if err != nil {
+ return nil, err
+ }
+
+ pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
+ return pubHash[:], nil
+}
+
+// FillTemplate is a utility function that tries to load as much of
+// the certificate template as possible from the profiles and current
+// template. It fills in the key uses, expiration, revocation URLs
+// and SKI.
+func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile) error {
+ ski, err := ComputeSKI(template)
+
+ var (
+ eku []x509.ExtKeyUsage
+ ku x509.KeyUsage
+ backdate time.Duration
+ expiry time.Duration
+ notBefore time.Time
+ notAfter time.Time
+ crlURL, ocspURL string
+ )
+
+ // The third value returned from Usages is a list of unknown key usages.
+ // This should be used when validating the profile at load, and isn't used
+ // here.
+ ku, eku, _ = profile.Usages()
+ if profile.IssuerURL == nil {
+ profile.IssuerURL = defaultProfile.IssuerURL
+ }
+
+ if ku == 0 && len(eku) == 0 {
+ return cferr.New(cferr.PolicyError, cferr.NoKeyUsages)
+ }
+
+ if expiry = profile.Expiry; expiry == 0 {
+ expiry = defaultProfile.Expiry
+ }
+
+ if crlURL = profile.CRL; crlURL == "" {
+ crlURL = defaultProfile.CRL
+ }
+ if ocspURL = profile.OCSP; ocspURL == "" {
+ ocspURL = defaultProfile.OCSP
+ }
+ if backdate = profile.Backdate; backdate == 0 {
+ backdate = -5 * time.Minute
+ } else {
+ backdate = -1 * profile.Backdate
+ }
+
+ if !profile.NotBefore.IsZero() {
+ notBefore = profile.NotBefore.UTC()
+ } else {
+ notBefore = time.Now().Round(time.Minute).Add(backdate).UTC()
+ }
+
+ if !profile.NotAfter.IsZero() {
+ notAfter = profile.NotAfter.UTC()
+ } else {
+ notAfter = notBefore.Add(expiry).UTC()
+ }
+
+ template.NotBefore = notBefore
+ template.NotAfter = notAfter
+ template.KeyUsage = ku
+ template.ExtKeyUsage = eku
+ template.BasicConstraintsValid = true
+ template.IsCA = profile.CA
+ template.SubjectKeyId = ski
+
+ if ocspURL != "" {
+ template.OCSPServer = []string{ocspURL}
+ }
+ if crlURL != "" {
+ template.CRLDistributionPoints = []string{crlURL}
+ }
+
+ if len(profile.IssuerURL) != 0 {
+ template.IssuingCertificateURL = profile.IssuerURL
+ }
+ if len(profile.Policies) != 0 {
+ err = addPolicies(template, profile.Policies)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+ }
+ if profile.OCSPNoCheck {
+ ocspNoCheckExtension := pkix.Extension{
+ Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5},
+ Critical: false,
+ Value: []byte{0x05, 0x00},
+ }
+ template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension)
+ }
+
+ return nil
+}
+
+type policyInformation struct {
+ PolicyIdentifier asn1.ObjectIdentifier
+ Qualifiers []interface{} `asn1:"tag:optional,omitempty"`
+}
+
+type cpsPolicyQualifier struct {
+ PolicyQualifierID asn1.ObjectIdentifier
+ Qualifier string `asn1:"tag:optional,ia5"`
+}
+
+type userNotice struct {
+ ExplicitText string `asn1:"tag:optional,utf8"`
+}
+type userNoticePolicyQualifier struct {
+ PolicyQualifierID asn1.ObjectIdentifier
+ Qualifier userNotice
+}
+
+var (
+ // Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents:
+ // iso(1) identified-organization(3) dod(6) internet(1) security(5)
+ // mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1)
+ iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1}
+ // iso(1) identified-organization(3) dod(6) internet(1) security(5)
+ // mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2)
+ iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2}
+
+ // CTPoisonOID is the object ID of the critical poison extension for precertificates
+ // https://tools.ietf.org/html/rfc6962#page-9
+ CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
+
+ // SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension
+ // https://tools.ietf.org/html/rfc6962#page-14
+ SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
+)
+
+// addPolicies adds Certificate Policies and optional Policy Qualifiers to a
+// certificate, based on the input config. Go's x509 library allows setting
+// Certificate Policies easily, but does not support nested Policy Qualifiers
+// under those policies. So we need to construct the ASN.1 structure ourselves.
+func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error {
+ asn1PolicyList := []policyInformation{}
+
+ for _, policy := range policies {
+ pi := policyInformation{
+ // The PolicyIdentifier is an OID assigned to a given issuer.
+ PolicyIdentifier: asn1.ObjectIdentifier(policy.ID),
+ }
+ for _, qualifier := range policy.Qualifiers {
+ switch qualifier.Type {
+ case "id-qt-unotice":
+ pi.Qualifiers = append(pi.Qualifiers,
+ userNoticePolicyQualifier{
+ PolicyQualifierID: iDQTUserNotice,
+ Qualifier: userNotice{
+ ExplicitText: qualifier.Value,
+ },
+ })
+ case "id-qt-cps":
+ pi.Qualifiers = append(pi.Qualifiers,
+ cpsPolicyQualifier{
+ PolicyQualifierID: iDQTCertificationPracticeStatement,
+ Qualifier: qualifier.Value,
+ })
+ default:
+ return errors.New("Invalid qualifier type in Policies " + qualifier.Type)
+ }
+ }
+ asn1PolicyList = append(asn1PolicyList, pi)
+ }
+
+ asn1Bytes, err := asn1.Marshal(asn1PolicyList)
+ if err != nil {
+ return err
+ }
+
+ template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{
+ Id: asn1.ObjectIdentifier{2, 5, 29, 32},
+ Critical: false,
+ Value: asn1Bytes,
+ })
+ return nil
+}
diff --git a/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE b/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE
new file mode 100644
index 0000000000..2387f30269
--- /dev/null
+++ b/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2014 Kyle Isom <kyle@gokyle.org>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go
new file mode 100644
index 0000000000..4b998a4845
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go
@@ -0,0 +1,43 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crc provides utility function for cyclic redundancy check
+// algorithms.
+package crc
+
+import (
+ "hash"
+ "hash/crc32"
+)
+
+// The size of a CRC-32 checksum in bytes.
+const Size = 4
+
+type digest struct {
+ crc uint32
+ tab *crc32.Table
+}
+
+// New creates a new hash.Hash32 computing the CRC-32 checksum
+// using the polynomial represented by the Table.
+// Modified by xiangli to take a prevcrc.
+func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} }
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return 1 }
+
+func (d *digest) Reset() { d.crc = 0 }
+
+func (d *digest) Write(p []byte) (n int, err error) {
+ d.crc = crc32.Update(d.crc, d.tab, p)
+ return len(p), nil
+}
+
+func (d *digest) Sum32() uint32 { return d.crc }
+
+func (d *digest) Sum(in []byte) []byte {
+ s := d.Sum32()
+ return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go
new file mode 100644
index 0000000000..145886a1a0
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go
@@ -0,0 +1,75 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fileutil implements utility functions related to files and paths.
+package fileutil
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "sort"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+const (
+ privateFileMode = 0600
+ // owner can make/remove files inside the directory
+ privateDirMode = 0700
+)
+
+var (
+ plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
+)
+
+// IsDirWriteable checks if dir is writable by writing and removing a file
+// to dir. It returns nil if dir is writable.
+func IsDirWriteable(dir string) error {
+ f := path.Join(dir, ".touch")
+ if err := ioutil.WriteFile(f, []byte(""), privateFileMode); err != nil {
+ return err
+ }
+ return os.Remove(f)
+}
+
+// ReadDir returns the filenames in the given directory in sorted order.
+func ReadDir(dirpath string) ([]string, error) {
+ dir, err := os.Open(dirpath)
+ if err != nil {
+ return nil, err
+ }
+ defer dir.Close()
+ names, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
+// does not exists. TouchDirAll also ensures the given directory is writable.
+func TouchDirAll(dir string) error {
+ err := os.MkdirAll(dir, privateDirMode)
+ if err != nil && err != os.ErrExist {
+ return err
+ }
+ return IsDirWriteable(dir)
+}
+
+func Exist(name string) bool {
+ _, err := os.Stat(name)
+ return err == nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go
new file mode 100644
index 0000000000..bf411d3a17
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go
@@ -0,0 +1,29 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+type Lock interface {
+ // Name returns the name of the file.
+ Name() string
+ // TryLock acquires exclusivity on the lock without blocking.
+ TryLock() error
+ // Lock acquires exclusivity on the lock.
+ Lock() error
+ // Unlock unlocks the lock.
+ Unlock() error
+ // Destroy should be called after Unlock to clean up
+ // the resources.
+ Destroy() error
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
new file mode 100644
index 0000000000..bd2bc86764
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
@@ -0,0 +1,79 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "time"
+)
+
+var (
+ ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+ fname string
+ file *os.File
+}
+
+func (l *lock) Name() string {
+ return l.fname
+}
+
+func (l *lock) TryLock() error {
+ err := os.Chmod(l.fname, syscall.DMEXCL|0600)
+ if err != nil {
+ return err
+ }
+
+ f, err := os.Open(l.fname)
+ if err != nil {
+ return ErrLocked
+ }
+
+ l.file = f
+ return nil
+}
+
+func (l *lock) Lock() error {
+ err := os.Chmod(l.fname, syscall.DMEXCL|0600)
+ if err != nil {
+ return err
+ }
+
+ for {
+ f, err := os.Open(l.fname)
+ if err == nil {
+ l.file = f
+ return nil
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+}
+
+func (l *lock) Unlock() error {
+ return l.file.Close()
+}
+
+func (l *lock) Destroy() error {
+ return nil
+}
+
+func NewLock(file string) (Lock, error) {
+ l := &lock{fname: file}
+ return l, nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
new file mode 100644
index 0000000000..e3b0a01768
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
@@ -0,0 +1,87 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build solaris
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+ "syscall"
+)
+
+var (
+ ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+ fd int
+ file *os.File
+}
+
+func (l *lock) Name() string {
+ return l.file.Name()
+}
+
+func (l *lock) TryLock() error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Pid = 0
+ lock.Type = syscall.F_WRLCK
+ lock.Whence = 0
+ lock.Pid = 0
+ err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
+ if err != nil && err == syscall.EAGAIN {
+ return ErrLocked
+ }
+ return err
+}
+
+func (l *lock) Lock() error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_WRLCK
+ lock.Whence = 0
+ lock.Pid = 0
+ return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
+}
+
+func (l *lock) Unlock() error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
+ if err != nil && err == syscall.EAGAIN {
+ return ErrLocked
+ }
+ return err
+}
+
+func (l *lock) Destroy() error {
+ return l.file.Close()
+}
+
+func NewLock(file string) (Lock, error) {
+ f, err := os.OpenFile(file, os.O_WRONLY, 0600)
+ if err != nil {
+ return nil, err
+ }
+ l := &lock{int(f.Fd()), f}
+ return l, nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
new file mode 100644
index 0000000000..4f90e42ace
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!plan9,!solaris
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+ "syscall"
+)
+
+var (
+ ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+ fd int
+ file *os.File
+}
+
+func (l *lock) Name() string {
+ return l.file.Name()
+}
+
+func (l *lock) TryLock() error {
+ err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
+ if err != nil && err == syscall.EWOULDBLOCK {
+ return ErrLocked
+ }
+ return err
+}
+
+func (l *lock) Lock() error {
+ return syscall.Flock(l.fd, syscall.LOCK_EX)
+}
+
+func (l *lock) Unlock() error {
+ return syscall.Flock(l.fd, syscall.LOCK_UN)
+}
+
+func (l *lock) Destroy() error {
+ return l.file.Close()
+}
+
+func NewLock(file string) (Lock, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ l := &lock{int(f.Fd()), f}
+ return l, nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
new file mode 100644
index 0000000000..ddca9a6695
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
@@ -0,0 +1,60 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+)
+
+var (
+ ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+ fd int
+ file *os.File
+}
+
+func (l *lock) Name() string {
+ return l.file.Name()
+}
+
+func (l *lock) TryLock() error {
+ return nil
+}
+
+func (l *lock) Lock() error {
+ return nil
+}
+
+func (l *lock) Unlock() error {
+ return nil
+}
+
+func (l *lock) Destroy() error {
+ return l.file.Close()
+}
+
+func NewLock(file string) (Lock, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ l := &lock{int(f.Fd()), f}
+ return l, nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go
new file mode 100644
index 0000000000..c1a952bb79
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go
@@ -0,0 +1,28 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package fileutil
+
+import "os"
+
+// Preallocate tries to allocate the space for given
+// file. This operation is only supported on linux by a
+// few filesystems (btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int) error {
+ return nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go
new file mode 100644
index 0000000000..c4bd4f4c81
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go
@@ -0,0 +1,42 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+// Preallocate tries to allocate the space for given
+// file. This operation is only supported on linux by a
+// few filesystems (btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int) error {
+ // use mode = 1 to keep size
+ // see FALLOC_FL_KEEP_SIZE
+ err := syscall.Fallocate(int(f.Fd()), 1, 0, int64(sizeInBytes))
+ if err != nil {
+ errno, ok := err.(syscall.Errno)
+ // treat not support as nil error
+ if ok && errno == syscall.ENOTSUP {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go
new file mode 100644
index 0000000000..375aa97197
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go
@@ -0,0 +1,80 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "time"
+)
+
+func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
+ errC := make(chan error, 1)
+ go func() {
+ for {
+ fnames, err := ReadDir(dirname)
+ if err != nil {
+ errC <- err
+ return
+ }
+ newfnames := make([]string, 0)
+ for _, fname := range fnames {
+ if strings.HasSuffix(fname, suffix) {
+ newfnames = append(newfnames, fname)
+ }
+ }
+ sort.Strings(newfnames)
+ for len(newfnames) > int(max) {
+ f := path.Join(dirname, newfnames[0])
+ l, err := NewLock(f)
+ if err != nil {
+ errC <- err
+ return
+ }
+ err = l.TryLock()
+ if err != nil {
+ break
+ }
+ err = os.Remove(f)
+ if err != nil {
+ errC <- err
+ return
+ }
+ err = l.Unlock()
+ if err != nil {
+ plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
+ errC <- err
+ return
+ }
+ err = l.Destroy()
+ if err != nil {
+ plog.Errorf("error destroying lock %s when purging file (%v)", l.Name(), err)
+ errC <- err
+ return
+ }
+ plog.Infof("purged file %s successfully", f)
+ newfnames = newfnames[1:]
+ }
+ select {
+ case <-time.After(interval):
+ case <-stop:
+ return
+ }
+ }
+ }()
+ return errC
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go
new file mode 100644
index 0000000000..cd7fff08f6
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go
@@ -0,0 +1,26 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package fileutil
+
+import "os"
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+ return f.Sync()
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
new file mode 100644
index 0000000000..14c4b4808e
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
@@ -0,0 +1,29 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+ return syscall.Fdatasync(int(f.Fd()))
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go
new file mode 100644
index 0000000000..6f1d379112
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go
@@ -0,0 +1,78 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package idutil implements utility functions for generating unique,
+// randomized ids.
+package idutil
+
+import (
+ "math"
+ "sync"
+ "time"
+)
+
+const (
+ tsLen = 5 * 8
+ cntLen = 8
+ suffixLen = tsLen + cntLen
+)
+
+// Generator generates unique identifiers based on counters, timestamps, and
+// a node member ID.
+//
+// The initial id is in this format:
+// High order byte is memberID, next 5 bytes are from timestamp,
+// and low order 2 bytes are 0s.
+// | prefix | suffix |
+// | 2 bytes | 5 bytes | 1 byte |
+// | memberID | timestamp | cnt |
+//
+// The timestamp 5 bytes is different when the machine is restart
+// after 1 ms and before 35 years.
+//
+// It increases suffix to generate the next id.
+// The count field may overflow to timestamp field, which is intentional.
+// It helps to extend the event window to 2^56. This doesn't break that
+// id generated after restart is unique because etcd throughput is <<
+// 256req/ms(250k reqs/second).
+type Generator struct {
+ mu sync.Mutex
+ // high order 2 bytes
+ prefix uint64
+ // low order 6 bytes
+ suffix uint64
+}
+
+func NewGenerator(memberID uint16, now time.Time) *Generator {
+ prefix := uint64(memberID) << suffixLen
+ unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
+ suffix := lowbit(unixMilli, tsLen) << cntLen
+ return &Generator{
+ prefix: prefix,
+ suffix: suffix,
+ }
+}
+
+// Next generates a id that is unique.
+func (g *Generator) Next() uint64 {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ g.suffix++
+ id := g.prefix | lowbit(g.suffix, suffixLen)
+ return id
+}
+
+func lowbit(x uint64, n uint) uint64 {
+ return x & (math.MaxUint64 >> (64 - n))
+}
diff --git a/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go
new file mode 100644
index 0000000000..9d640a8af6
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go
@@ -0,0 +1,60 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil defines interfaces for handling Protocol Buffer objects.
+package pbutil
+
+import "github.com/coreos/pkg/capnslog"
+
+var (
+ plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags")
+)
+
+type Marshaler interface {
+ Marshal() (data []byte, err error)
+}
+
+type Unmarshaler interface {
+ Unmarshal(data []byte) error
+}
+
+func MustMarshal(m Marshaler) []byte {
+ d, err := m.Marshal()
+ if err != nil {
+ plog.Panicf("marshal should never fail (%v)", err)
+ }
+ return d
+}
+
+func MustUnmarshal(um Unmarshaler, data []byte) {
+ if err := um.Unmarshal(data); err != nil {
+ plog.Panicf("unmarshal should never fail (%v)", err)
+ }
+}
+
+func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
+ if err := um.Unmarshal(data); err != nil {
+ return false
+ }
+ return true
+}
+
+func GetBool(v *bool) (vv bool, set bool) {
+ if v == nil {
+ return false, false
+ }
+ return *v, true
+}
+
+func Boolp(b bool) *bool { return &b }
diff --git a/vendor/src/github.com/coreos/etcd/raft/design.md b/vendor/src/github.com/coreos/etcd/raft/design.md
new file mode 100644
index 0000000000..7bc0531dce
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/design.md
@@ -0,0 +1,57 @@
+## Progress
+
+Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress.
+
+`replication message` is a `msgApp` with log entries.
+
+A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
+
+A progress is in one of the three state: `probe`, `replicate`, `snapshot`.
+
+```
+ +--------------------------------------------------------+
+ | send snapshot |
+ | |
+ +---------+----------+ +----------v---------+
+ +---> probe | | snapshot |
+ | | max inflight = 1 <----------------------------------+ max inflight = 0 |
+ | +---------+----------+ +--------------------+
+ | | 1. snapshot success
+ | | (next=snapshot.index + 1)
+ | | 2. snapshot failure
+ | | (no change)
+ | | 3. receives msgAppResp(rej=false&&index>lastsnap.index)
+ | | (match=m.index,next=match+1)
+receives msgAppResp(rej=true)
+(next=match+1)| |
+ | |
+ | |
+ | | receives msgAppResp(rej=false&&index>match)
+ | | (match=m.index,next=match+1)
+ | |
+ | |
+ | |
+ | +---------v----------+
+ | | replicate |
+ +---+ max inflight = n |
+ +--------------------+
+```
+
+When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
+
+When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
+
+When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
+
+A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
+
+A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question)
+
+A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
+
+### Flow Control
+
+1. limit the max size of message sent per message. Max should be configurable.
+Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
+
+2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly.
diff --git a/vendor/src/github.com/coreos/etcd/raft/doc.go b/vendor/src/github.com/coreos/etcd/raft/doc.go
new file mode 100644
index 0000000000..6ed0bcfadb
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/doc.go
@@ -0,0 +1,293 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+ storage := raft.NewMemoryStorage()
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+ n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+ storage := raft.NewMemoryStorage()
+
+ // recover the in-memory storage from persistent
+ // snapshot, state and entries.
+ storage.ApplySnapshot(snapshot)
+ storage.SetHardState(state)
+ storage.Append(entries)
+
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+
+ // restart raft without peer information.
+ // peer information is already included in the storage.
+ n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until after the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialise the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+ func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+ n.Step(ctx, m)
+ }
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+ for {
+ select {
+ case <-s.Ticker:
+ n.Tick()
+ case rd := <-s.Node.Ready():
+ saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+ send(rd.Messages)
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ processSnapshot(rd.Snapshot)
+ }
+ for _, entry := range rd.CommittedEntries {
+ process(entry)
+ if entry.Type == raftpb.EntryConfChange {
+ var cc raftpb.ConfChange
+ cc.Unmarshal(entry.Data)
+ s.Node.ApplyConfChange(cc)
+ }
+ s.Node.Advance()
+ case <-s.done:
+ return
+ }
+ }
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+ n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+ n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+ var cc raftpb.ConfChange
+ cc.Unmarshal(data)
+ n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+ 'MsgHup' is used for election. If a node is a follower or candidate, the
+ 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+ candidate has not received any heartbeat before the election timeout, it
+ passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+ start a new election.
+
+ 'MsgBeat' is an internal type that signals leaders to send a heartbeat of
+ the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+ the 'raft' struct is set as 'tickHeartbeat', and sends periodic heartbeat
+ messages of the 'MsgBeat' type to its followers.
+
+ 'MsgProp' proposes to append data to its log entries. This is a special
+ type to redirect proposals to leader. Therefore, send method overwrites
+ raftpb.Message's term with its HardState's term to avoid attaching its
+ local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+ method, the leader first calls the 'appendEntry' method to append entries
+ to its log, and then calls 'bcastAppend' method to send those entries to
+ its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+ follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+ method. It is stored with sender's ID and later forwarded to leader by
+ rafthttp package.
+
+ 'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+ which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+ type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+ back to follower, because it indicates that there is a valid leader sending
+ 'MsgApp' messages. Candidate and follower respond to this message in
+ 'MsgAppResp' type.
+
+ 'MsgAppResp' is response to log replication request('MsgApp'). When
+ 'MsgApp' is passed to candidate or follower's Step method, it responds by
+ calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+ mailbox.
+
+ 'MsgVote' requests votes for election. When a node is a follower or
+ candidate and 'MsgHup' is passed to its Step method, then the node calls
+ 'campaign' method to campaign itself to become a leader. Once 'campaign'
+ method is called, the node becomes candidate and sends 'MsgVote' to peers
+ in cluster to request votes. When passed to leader or candidate's Step
+ method and the message's Term is lower than leader's or candidate's,
+ 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+ If leader or candidate receives 'MsgVote' with higher term, it will revert
+ back to follower. When 'MsgVote' is passed to follower, it votes for the
+ sender only when sender's last term is greater than MsgVote's term or
+ sender's last term is equal to MsgVote's term but sender's last committed
+ index is greater than or equal to follower's.
+
+ 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+ passed to candidate, the candidate calculates how many votes it has won. If
+ it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+ If candidate receives majority of votes of denials, it reverts back to
+ follower.
+
+ 'MsgSnap' requests to install a snapshot message. When a node has just
+ become a leader or the leader receives 'MsgProp' message, it calls
+ 'bcastAppend' method, which then calls 'sendAppend' method to each
+ follower. In 'sendAppend', if a leader fails to get term or entries,
+ the leader requests snapshot by sending 'MsgSnap' type message.
+
+ 'MsgSnapStatus' tells the result of snapshot install message. When a
+ follower rejected 'MsgSnap', it indicates the snapshot request with
+ 'MsgSnap' had failed from network issues which causes the network layer
+ to fail to send out snapshots to its followers. Then leader considers
+ follower's progress as probe. When 'MsgSnap' were not rejected, it
+ indicates that the snapshot succeeded and the leader sets follower's
+ progress to probe and resumes its log replication.
+
+ 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+ to candidate and message's term is higher than candidate's, the candidate
+ reverts back to follower and updates its committed index from the one in
+ this heartbeat. And it sends the message to its mailbox. When
+ 'MsgHeartbeat' is passed to follower's Step method and message's term is
+ higher than follower's, the follower updates its leaderID with the ID
+ from the message.
+
+ 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+ is passed to leader's Step method, the leader knows which follower
+ responded. And only when the leader's last committed index is greater than
+ follower's Match index, the leader runs 'sendAppend` method.
+
+ 'MsgUnreachable' tells that request(message) wasn't delivered. When
+ 'MsgUnreachable' is passed to leader's Step method, the leader discovers
+ that the follower that sent this 'MsgUnreachable' is not reachable, often
+ indicating 'MsgApp' is lost. When follower's progress state is replicate,
+ the leader sets it back to probe.
+
+*/
+package raft
diff --git a/vendor/src/github.com/coreos/etcd/raft/log.go b/vendor/src/github.com/coreos/etcd/raft/log.go
new file mode 100644
index 0000000000..99cd1b31ef
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/log.go
@@ -0,0 +1,361 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+ "log"
+
+ pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type raftLog struct {
+ // storage contains all stable entries since the last snapshot.
+ storage Storage
+
+ // unstable contains all unstable entries and snapshot.
+ // they will be saved into storage.
+ unstable unstable
+
+ // committed is the highest log position that is known to be in
+ // stable storage on a quorum of nodes.
+ committed uint64
+ // applied is the highest log position that the application has
+ // been instructed to apply to its state machine.
+ // Invariant: applied <= committed
+ applied uint64
+
+ logger Logger
+}
+
+// newLog returns log using the given storage. It recovers the log to the state
+// that it just commits and applies the latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+ if storage == nil {
+ log.Panic("storage must not be nil")
+ }
+ log := &raftLog{
+ storage: storage,
+ logger: logger,
+ }
+ firstIndex, err := storage.FirstIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ lastIndex, err := storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ log.unstable.offset = lastIndex + 1
+ log.unstable.logger = logger
+ // Initialize our committed and applied pointers to the time of the last compaction.
+ log.committed = firstIndex - 1
+ log.applied = firstIndex - 1
+
+ return log
+}
+
+func (l *raftLog) String() string {
+ return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
+ lastnewi = index + uint64(len(ents))
+ if l.matchTerm(index, logTerm) {
+ ci := l.findConflict(ents)
+ switch {
+ case ci == 0:
+ case ci <= l.committed:
+ l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+ default:
+ offset := index + 1
+ l.append(ents[ci-offset:]...)
+ }
+ l.commitTo(min(committed, lastnewi))
+ return lastnewi, true
+ }
+ return 0, false
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+ if len(ents) == 0 {
+ return l.lastIndex()
+ }
+ if after := ents[0].Index - 1; after < l.committed {
+ l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+ }
+ l.unstable.truncateAndAppend(ents)
+ return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The first entry MUST have an index equal to the argument 'from'.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+ for _, ne := range ents {
+ if !l.matchTerm(ne.Index, ne.Term) {
+ if ne.Index <= l.lastIndex() {
+ l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+ ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
+ }
+ return ne.Index
+ }
+ }
+ return 0
+}
+
+func (l *raftLog) unstableEntries() []pb.Entry {
+ if len(l.unstable.entries) == 0 {
+ return nil
+ }
+ return l.unstable.entries
+}
+
+// nextEnts returns all the available entries for execution.
+// If applied is smaller than the index of snapshot, it returns all committed
+// entries after the index of snapshot.
+func (l *raftLog) nextEnts() (ents []pb.Entry) {
+ off := max(l.applied+1, l.firstIndex())
+ if l.committed+1 > off {
+ ents, err := l.slice(off, l.committed+1, noLimit)
+ if err != nil {
+ l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+ }
+ return ents
+ }
+ return nil
+}
+
+// hasNextEnts returns if there is any available entries for execution. This
+// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
+func (l *raftLog) hasNextEnts() bool {
+ off := max(l.applied+1, l.firstIndex())
+ if l.committed+1 > off {
+ return true
+ }
+ return false
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+ if l.unstable.snapshot != nil {
+ return *l.unstable.snapshot, nil
+ }
+ return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+ if i, ok := l.unstable.maybeFirstIndex(); ok {
+ return i
+ }
+ index, err := l.storage.FirstIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+ if i, ok := l.unstable.maybeLastIndex(); ok {
+ return i
+ }
+ i, err := l.storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+ // never decrease commit
+ if l.committed < tocommit {
+ if l.lastIndex() < tocommit {
+ l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+ }
+ l.committed = tocommit
+ }
+}
+
+func (l *raftLog) appliedTo(i uint64) {
+ if i == 0 {
+ return
+ }
+ if l.committed < i || i < l.applied {
+ l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+ }
+ l.applied = i
+}
+
+func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+func (l *raftLog) lastTerm() uint64 {
+ t, err := l.term(l.lastIndex())
+ if err != nil {
+ l.logger.Panicf("unexpected error when getting the last term (%v)", err)
+ }
+ return t
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+ // the valid term range is [index of dummy entry, last index]
+ dummyIndex := l.firstIndex() - 1
+ if i < dummyIndex || i > l.lastIndex() {
+ // TODO: return an error instead?
+ return 0, nil
+ }
+
+ if t, ok := l.unstable.maybeTerm(i); ok {
+ return t, nil
+ }
+
+ t, err := l.storage.Term(i)
+ if err == nil {
+ return t, nil
+ }
+ if err == ErrCompacted {
+ return 0, err
+ }
+ panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
+ if i > l.lastIndex() {
+ return nil, nil
+ }
+ return l.slice(i, l.lastIndex()+1, maxsize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+ ents, err := l.entries(l.firstIndex(), noLimit)
+ if err == nil {
+ return ents
+ }
+ if err == ErrCompacted { // try again if there was a racing compaction
+ return l.allEntries()
+ }
+ // TODO (xiangli): handle error?
+ panic(err)
+}
+
+// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(lasti, term uint64) bool {
+ return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
+}
+
+func (l *raftLog) matchTerm(i, term uint64) bool {
+ t, err := l.term(i)
+ if err != nil {
+ return false
+ }
+ return t == term
+}
+
+func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
+ if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
+ l.commitTo(maxIndex)
+ return true
+ }
+ return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+ l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+ l.committed = s.Metadata.Index
+ l.unstable.restore(s)
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+ err := l.mustCheckOutOfBounds(lo, hi)
+ if err != nil {
+ return nil, err
+ }
+ if lo == hi {
+ return nil, nil
+ }
+ var ents []pb.Entry
+ if lo < l.unstable.offset {
+ storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
+ if err == ErrCompacted {
+ return nil, err
+ } else if err == ErrUnavailable {
+ l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
+ } else if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+
+ // check if ents has reached the size limitation
+ if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
+ return storedEnts, nil
+ }
+
+ ents = storedEnts
+ }
+ if hi > l.unstable.offset {
+ unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
+ if len(ents) > 0 {
+ ents = append([]pb.Entry{}, ents...)
+ ents = append(ents, unstable...)
+ } else {
+ ents = unstable
+ }
+ }
+ return limitSize(ents, maxSize), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+ if lo > hi {
+ l.logger.Panicf("invalid slice %d > %d", lo, hi)
+ }
+ fi := l.firstIndex()
+ if lo < fi {
+ return ErrCompacted
+ }
+
+ length := l.lastIndex() - fi + 1
+ if lo < fi || hi > fi+length {
+ l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+ }
+ return nil
+}
+
+func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
+ if err == nil {
+ return t
+ }
+ if err == ErrCompacted {
+ return 0
+ }
+ l.logger.Panicf("unexpected error (%v)", err)
+ return 0
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/log_unstable.go b/vendor/src/github.com/coreos/etcd/raft/log_unstable.go
new file mode 100644
index 0000000000..df90178f9a
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/log_unstable.go
@@ -0,0 +1,139 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+ // the incoming unstable snapshot, if any.
+ snapshot *pb.Snapshot
+ // all entries that have not yet been written to storage.
+ entries []pb.Entry
+ offset uint64
+
+ logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+ if u.snapshot != nil {
+ return u.snapshot.Metadata.Index + 1, true
+ }
+ return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+ if l := len(u.entries); l != 0 {
+ return u.offset + uint64(l) - 1, true
+ }
+ if u.snapshot != nil {
+ return u.snapshot.Metadata.Index, true
+ }
+ return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+ if i < u.offset {
+ if u.snapshot == nil {
+ return 0, false
+ }
+ if u.snapshot.Metadata.Index == i {
+ return u.snapshot.Metadata.Term, true
+ }
+ return 0, false
+ }
+
+ last, ok := u.maybeLastIndex()
+ if !ok {
+ return 0, false
+ }
+ if i > last {
+ return 0, false
+ }
+ return u.entries[i-u.offset].Term, true
+}
+
+func (u *unstable) stableTo(i, t uint64) {
+ gt, ok := u.maybeTerm(i)
+ if !ok {
+ return
+ }
+ // if i < offset, term is matched with the snapshot
+ // only update the unstable entries if term is matched with
+ // an unstable entry.
+ if gt == t && i >= u.offset {
+ u.entries = u.entries[i+1-u.offset:]
+ u.offset = i + 1
+ }
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+ if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+ u.snapshot = nil
+ }
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+ u.offset = s.Metadata.Index + 1
+ u.entries = nil
+ u.snapshot = &s
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+ after := ents[0].Index - 1
+ switch {
+ case after == u.offset+uint64(len(u.entries))-1:
+ // after is the last index in the u.entries
+ // directly append
+ u.entries = append(u.entries, ents...)
+ case after < u.offset:
+ u.logger.Infof("replace the unstable entries from index %d", after+1)
+ // The log is being truncated to before our current offset
+ // portion, so set the offset and replace the entries
+ u.offset = after + 1
+ u.entries = ents
+ default:
+ // truncate to after and copy to u.entries
+ // then append
+ u.logger.Infof("truncate the unstable entries to index %d", after)
+ u.entries = append([]pb.Entry{}, u.slice(u.offset, after+1)...)
+ u.entries = append(u.entries, ents...)
+ }
+}
+
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+ u.mustCheckOutOfBounds(lo, hi)
+ return u.entries[lo-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.offset)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+ if lo > hi {
+ u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+ }
+ upper := u.offset + uint64(len(u.entries))
+ if lo < u.offset || hi > upper {
+ u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+ }
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/logger.go b/vendor/src/github.com/coreos/etcd/raft/logger.go
new file mode 100644
index 0000000000..31c194a06b
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/logger.go
@@ -0,0 +1,126 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+)
+
+type Logger interface {
+ Debug(v ...interface{})
+ Debugf(format string, v ...interface{})
+
+ Error(v ...interface{})
+ Errorf(format string, v ...interface{})
+
+ Info(v ...interface{})
+ Infof(format string, v ...interface{})
+
+ Warning(v ...interface{})
+ Warningf(format string, v ...interface{})
+
+ Fatal(v ...interface{})
+ Fatalf(format string, v ...interface{})
+
+ Panic(v ...interface{})
+ Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) { raftLogger = l }
+
+var (
+ defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+ discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
+ raftLogger = Logger(defaultLogger)
+)
+
+const (
+ calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+ *log.Logger
+ debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+ l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+ l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+ if l.debug {
+ l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+ }
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+ if l.debug {
+ l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+ }
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+ l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+ l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+ l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+ l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+ l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+ l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+ l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+ l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+ l.Logger.Panic(v)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+ l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+ return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/node.go b/vendor/src/github.com/coreos/etcd/raft/node.go
new file mode 100644
index 0000000000..c80dbc4b8a
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/node.go
@@ -0,0 +1,488 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+
+ "golang.org/x/net/context"
+ pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+ SnapshotFinish SnapshotStatus = 1
+ SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+ emptyState = pb.HardState{}
+
+ // ErrStopped is returned by methods on Nodes that have been stopped.
+ ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+ Lead uint64
+ RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+ return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+ // The current volatile state of a Node.
+ // SoftState will be nil if there is no update.
+ // It is not required to consume or store SoftState.
+ *SoftState
+
+ // The current state of a Node to be saved to stable storage BEFORE
+ // Messages are sent.
+ // HardState will be equal to empty state if there is no update.
+ pb.HardState
+
+ // Entries specifies entries to be saved to stable storage BEFORE
+ // Messages are sent.
+ Entries []pb.Entry
+
+ // Snapshot specifies the snapshot to be saved to stable storage.
+ Snapshot pb.Snapshot
+
+ // CommittedEntries specifies entries to be committed to a
+ // store/state-machine. These have previously been committed to stable
+ // store.
+ CommittedEntries []pb.Entry
+
+ // Messages specifies outbound messages to be sent AFTER Entries are
+ // committed to stable storage.
+ // If it contains a MsgSnap message, the application MUST report back to raft
+ // when the snapshot has been received or has failed by calling ReportSnapshot.
+ Messages []pb.Message
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+ return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+ return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+ return sp.Metadata.Index == 0
+}
+
+func (rd Ready) containsUpdates() bool {
+ return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
+ !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
+ len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+ // Tick increments the internal logical clock for the Node by a single tick. Election
+ // timeouts and heartbeat timeouts are in units of ticks.
+ Tick()
+ // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+ Campaign(ctx context.Context) error
+ // Propose proposes that data be appended to the log.
+ Propose(ctx context.Context, data []byte) error
+ // ProposeConfChange proposes config change.
+ // At most one ConfChange can be in the process of going through consensus.
+ // Application needs to call ApplyConfChange when applying EntryConfChange type entry.
+ ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
+ // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+ Step(ctx context.Context, msg pb.Message) error
+
+ // Ready returns a channel that returns the current point-in-time state.
+ // Users of the Node must call Advance after retrieving the state returned by Ready.
+ //
+ // NOTE: No committed entries from the next Ready may be applied until all committed entries
+ // and snapshots from the previous one have finished.
+ Ready() <-chan Ready
+
+ // Advance notifies the Node that the application has saved progress up to the last Ready.
+ // It prepares the node to return the next available Ready.
+ //
+ // The application should generally call Advance after it applies the entries in last Ready.
+ //
+ // However, as an optimization, the application may call Advance while it is applying the
+ // commands. For example. when the last Ready contains a snapshot, the application might take
+ // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+ // progress, it can call Advance before finish applying the last ready. To make this optimization
+ // work safely, when the application receives a Ready with softState.RaftState equal to Candidate
+ // it MUST apply all pending configuration changes if there is any.
+ //
+ // Here is a simple solution that waiting for ALL pending entries to get applied.
+ // ```
+ // ...
+ // rd := <-n.Ready()
+ // go apply(rd.CommittedEntries) // optimization to apply asynchronously in FIFO order.
+ // if rd.SoftState.RaftState == StateCandidate {
+ // waitAllApplied()
+ // }
+ // n.Advance()
+ // ...
+ //```
+ Advance()
+ // ApplyConfChange applies config change to the local node.
+ // Returns an opaque ConfState protobuf which must be recorded
+ // in snapshots. Will never return nil; it returns a pointer only
+ // to match MemoryStorage.Compact.
+ ApplyConfChange(cc pb.ConfChange) *pb.ConfState
+ // Status returns the current status of the raft state machine.
+ Status() Status
+ // ReportUnreachable reports the given node is not reachable for the last send.
+ ReportUnreachable(id uint64)
+ // ReportSnapshot reports the status of the sent snapshot.
+ ReportSnapshot(id uint64, status SnapshotStatus)
+ // Stop performs any necessary termination of the Node.
+ Stop()
+}
+
+type Peer struct {
+ ID uint64
+ Context []byte
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+func StartNode(c *Config, peers []Peer) Node {
+ r := newRaft(c)
+ // become the follower at term 1 and apply initial configuration
+ // entries of term 1
+ r.becomeFollower(1, None)
+ for _, peer := range peers {
+ cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+ d, err := cc.Marshal()
+ if err != nil {
+ panic("unexpected marshal error")
+ }
+ e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
+ r.raftLog.append(e)
+ }
+ // Mark these initial entries as committed.
+ // TODO(bdarnell): These entries are still unstable; do we need to preserve
+ // the invariant that committed < unstable?
+ r.raftLog.committed = r.raftLog.lastIndex()
+ // Now apply them, mainly so that the application can call Campaign
+ // immediately after StartNode in tests. Note that these nodes will
+ // be added to raft twice: here and when the application's Ready
+ // loop calls ApplyConfChange. The calls to addNode must come after
+ // all calls to raftLog.append so progress.next is set after these
+ // bootstrapping entries (it is an error if we try to append these
+ // entries since they have already been committed).
+ // We do not set raftLog.applied so the application will be able
+ // to observe all conf changes via Ready.CommittedEntries.
+ for _, peer := range peers {
+ r.addNode(peer.ID)
+ }
+
+ n := newNode()
+ go n.run(r)
+ return &n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+ r := newRaft(c)
+
+ n := newNode()
+ go n.run(r)
+ return &n
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+ propc chan pb.Message
+ recvc chan pb.Message
+ confc chan pb.ConfChange
+ confstatec chan pb.ConfState
+ readyc chan Ready
+ advancec chan struct{}
+ tickc chan struct{}
+ done chan struct{}
+ stop chan struct{}
+ status chan chan Status
+}
+
+func newNode() node {
+ return node{
+ propc: make(chan pb.Message),
+ recvc: make(chan pb.Message),
+ confc: make(chan pb.ConfChange),
+ confstatec: make(chan pb.ConfState),
+ readyc: make(chan Ready),
+ advancec: make(chan struct{}),
+ tickc: make(chan struct{}),
+ done: make(chan struct{}),
+ stop: make(chan struct{}),
+ status: make(chan chan Status),
+ }
+}
+
+func (n *node) Stop() {
+ select {
+ case n.stop <- struct{}{}:
+ // Not already stopped, so trigger it
+ case <-n.done:
+ // Node has already been stopped - no need to do anything
+ return
+ }
+ // Block until the stop has been acknowledged by run()
+ <-n.done
+}
+
+func (n *node) run(r *raft) {
+ var propc chan pb.Message
+ var readyc chan Ready
+ var advancec chan struct{}
+ var prevLastUnstablei, prevLastUnstablet uint64
+ var havePrevLastUnstablei bool
+ var prevSnapi uint64
+ var rd Ready
+
+ lead := None
+ prevSoftSt := r.softState()
+ prevHardSt := emptyState
+
+ for {
+ if advancec != nil {
+ readyc = nil
+ } else {
+ rd = newReady(r, prevSoftSt, prevHardSt)
+ if rd.containsUpdates() {
+ readyc = n.readyc
+ } else {
+ readyc = nil
+ }
+ }
+
+ if lead != r.lead {
+ if r.hasLeader() {
+ if lead == None {
+ r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+ } else {
+ r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+ }
+ propc = n.propc
+ } else {
+ r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+ propc = nil
+ }
+ lead = r.lead
+ }
+
+ select {
+ // TODO: maybe buffer the config propose if there exists one (the way
+ // described in raft dissertation)
+ // Currently it is dropped in Step silently.
+ case m := <-propc:
+ m.From = r.id
+ r.Step(m)
+ case m := <-n.recvc:
+ // filter out response message from unknown From.
+ if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m) {
+ r.Step(m) // raft never returns an error
+ }
+ case cc := <-n.confc:
+ if cc.NodeID == None {
+ r.resetPendingConf()
+ select {
+ case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+ case <-n.done:
+ }
+ break
+ }
+ switch cc.Type {
+ case pb.ConfChangeAddNode:
+ r.addNode(cc.NodeID)
+ case pb.ConfChangeRemoveNode:
+ // block incoming proposal when local node is
+ // removed
+ if cc.NodeID == r.id {
+ n.propc = nil
+ }
+ r.removeNode(cc.NodeID)
+ case pb.ConfChangeUpdateNode:
+ r.resetPendingConf()
+ default:
+ panic("unexpected conf type")
+ }
+ select {
+ case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+ case <-n.done:
+ }
+ case <-n.tickc:
+ r.tick()
+ case readyc <- rd:
+ if rd.SoftState != nil {
+ prevSoftSt = rd.SoftState
+ }
+ if len(rd.Entries) > 0 {
+ prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
+ prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
+ havePrevLastUnstablei = true
+ }
+ if !IsEmptyHardState(rd.HardState) {
+ prevHardSt = rd.HardState
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ prevSnapi = rd.Snapshot.Metadata.Index
+ }
+ r.msgs = nil
+ advancec = n.advancec
+ case <-advancec:
+ if prevHardSt.Commit != 0 {
+ r.raftLog.appliedTo(prevHardSt.Commit)
+ }
+ if havePrevLastUnstablei {
+ r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
+ havePrevLastUnstablei = false
+ }
+ r.raftLog.stableSnapTo(prevSnapi)
+ advancec = nil
+ case c := <-n.status:
+ c <- getStatus(r)
+ case <-n.stop:
+ close(n.done)
+ return
+ }
+ }
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+ select {
+ case n.tickc <- struct{}{}:
+ case <-n.done:
+ }
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+ return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+ // ignore unexpected local messages receiving over network
+ if IsLocalMsg(m) {
+ // TODO: return an error?
+ return nil
+ }
+ return n.step(ctx, m)
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
+ data, err := cc.Marshal()
+ if err != nil {
+ return err
+ }
+ return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) step(ctx context.Context, m pb.Message) error {
+ ch := n.recvc
+ if m.Type == pb.MsgProp {
+ ch = n.propc
+ }
+
+ select {
+ case ch <- m:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.done:
+ return ErrStopped
+ }
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+ select {
+ case n.advancec <- struct{}{}:
+ case <-n.done:
+ }
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+ var cs pb.ConfState
+ select {
+ case n.confc <- cc:
+ case <-n.done:
+ }
+ select {
+ case cs = <-n.confstatec:
+ case <-n.done:
+ }
+ return &cs
+}
+
+func (n *node) Status() Status {
+ c := make(chan Status)
+ n.status <- c
+ return <-c
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+ select {
+ case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+ case <-n.done:
+ }
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+ rej := status == SnapshotFailure
+
+ select {
+ case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+ case <-n.done:
+ }
+}
+
+func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
+ rd := Ready{
+ Entries: r.raftLog.unstableEntries(),
+ CommittedEntries: r.raftLog.nextEnts(),
+ Messages: r.msgs,
+ }
+ if softSt := r.softState(); !softSt.equal(prevSoftSt) {
+ rd.SoftState = softSt
+ }
+ if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
+ rd.HardState = hardSt
+ }
+ if r.raftLog.unstable.snapshot != nil {
+ rd.Snapshot = *r.raftLog.unstable.snapshot
+ }
+ return rd
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/progress.go b/vendor/src/github.com/coreos/etcd/raft/progress.go
new file mode 100644
index 0000000000..11f53409d4
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/progress.go
@@ -0,0 +1,245 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import "fmt"
+
+const (
+ ProgressStateProbe ProgressStateType = iota
+ ProgressStateReplicate
+ ProgressStateSnapshot
+)
+
+type ProgressStateType uint64
+
+var prstmap = [...]string{
+ "ProgressStateProbe",
+ "ProgressStateReplicate",
+ "ProgressStateSnapshot",
+}
+
+func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
+
+// Progress represents a follower’s progress in the view of the leader. Leader maintains
+// progresses of all followers, and sends entries to the follower based on its progress.
+type Progress struct {
+ Match, Next uint64
+ // State defines how the leader should interact with the follower.
+ //
+ // When in ProgressStateProbe, leader sends at most one replication message
+ // per heartbeat interval. It also probes actual progress of the follower.
+ //
+ // When in ProgressStateReplicate, leader optimistically increases next
+ // to the latest entry sent after sending replication message. This is
+ // an optimized state for fast replicating log entries to the follower.
+ //
+ // When in ProgressStateSnapshot, leader should have sent out snapshot
+ // before and stops sending any replication message.
+ State ProgressStateType
+ // Paused is used in ProgressStateProbe.
+ // When Paused is true, raft should pause sending replication message to this peer.
+ Paused bool
+ // PendingSnapshot is used in ProgressStateSnapshot.
+ // If there is a pending snapshot, the pendingSnapshot will be set to the
+ // index of the snapshot. If pendingSnapshot is set, the replication process of
+ // this Progress will be paused. raft will not resend snapshot until the pending one
+ // is reported to be failed.
+ PendingSnapshot uint64
+
+ // RecentActive is true if the progress is recently active. Receiving any messages
+ // from the corresponding follower indicates the progress is active.
+ // RecentActive can be reset to false after an election timeout.
+ RecentActive bool
+
+ // inflights is a sliding window for the inflight messages.
+ // When inflights is full, no more message should be sent.
+ // When a leader sends out a message, the index of the last
+ // entry should be added to inflights. The index MUST be added
+ // into inflights in order.
+ // When a leader receives a reply, the previous inflights should
+ // be freed by calling inflights.freeTo.
+ ins *inflights
+}
+
+func (pr *Progress) resetState(state ProgressStateType) {
+ pr.Paused = false
+ pr.RecentActive = false
+ pr.PendingSnapshot = 0
+ pr.State = state
+ pr.ins.reset()
+}
+
+func (pr *Progress) becomeProbe() {
+ // If the original state is ProgressStateSnapshot, progress knows that
+ // the pending snapshot has been sent to this peer successfully, then
+ // probes from pendingSnapshot + 1.
+ if pr.State == ProgressStateSnapshot {
+ pendingSnapshot := pr.PendingSnapshot
+ pr.resetState(ProgressStateProbe)
+ pr.Next = max(pr.Match+1, pendingSnapshot+1)
+ } else {
+ pr.resetState(ProgressStateProbe)
+ pr.Next = pr.Match + 1
+ }
+}
+
+func (pr *Progress) becomeReplicate() {
+ pr.resetState(ProgressStateReplicate)
+ pr.Next = pr.Match + 1
+}
+
+func (pr *Progress) becomeSnapshot(snapshoti uint64) {
+ pr.resetState(ProgressStateSnapshot)
+ pr.PendingSnapshot = snapshoti
+}
+
+// maybeUpdate returns false if the given n index comes from an outdated message.
+// Otherwise it updates the progress and returns true.
+func (pr *Progress) maybeUpdate(n uint64) bool {
+ var updated bool
+ if pr.Match < n {
+ pr.Match = n
+ updated = true
+ pr.resume()
+ }
+ if pr.Next < n+1 {
+ pr.Next = n + 1
+ }
+ return updated
+}
+
+func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
+
+// maybeDecrTo returns false if the given to index comes from an out of order message.
+// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
+func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
+ if pr.State == ProgressStateReplicate {
+ // the rejection must be stale if the progress has matched and "rejected"
+ // is smaller than "match".
+ if rejected <= pr.Match {
+ return false
+ }
+ // directly decrease next to match + 1
+ pr.Next = pr.Match + 1
+ return true
+ }
+
+ // the rejection must be stale if "rejected" does not match next - 1
+ if pr.Next-1 != rejected {
+ return false
+ }
+
+ if pr.Next = min(rejected, last+1); pr.Next < 1 {
+ pr.Next = 1
+ }
+ pr.resume()
+ return true
+}
+
+func (pr *Progress) pause() { pr.Paused = true }
+func (pr *Progress) resume() { pr.Paused = false }
+
+// isPaused returns whether progress stops sending message.
+func (pr *Progress) isPaused() bool {
+ switch pr.State {
+ case ProgressStateProbe:
+ return pr.Paused
+ case ProgressStateReplicate:
+ return pr.ins.full()
+ case ProgressStateSnapshot:
+ return true
+ default:
+ panic("unexpected state")
+ }
+}
+
+func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
+
+// maybeSnapshotAbort unsets pendingSnapshot if Match is equal or higher than
+// the pendingSnapshot
+func (pr *Progress) maybeSnapshotAbort() bool {
+ return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
+}
+
+func (pr *Progress) String() string {
+ return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.isPaused(), pr.PendingSnapshot)
+}
+
+type inflights struct {
+ // the starting index in the buffer
+ start int
+ // number of inflights in the buffer
+ count int
+
+ // the size of the buffer
+ size int
+ buffer []uint64
+}
+
+func newInflights(size int) *inflights {
+ return &inflights{
+ size: size,
+ buffer: make([]uint64, size),
+ }
+}
+
+// add adds an inflight into inflights
+func (in *inflights) add(inflight uint64) {
+ if in.full() {
+ panic("cannot add into a full inflights")
+ }
+ next := in.start + in.count
+ if next >= in.size {
+ next -= in.size
+ }
+ in.buffer[next] = inflight
+ in.count++
+}
+
+// freeTo frees the inflights smaller or equal to the given `to` flight.
+func (in *inflights) freeTo(to uint64) {
+ if in.count == 0 || to < in.buffer[in.start] {
+ // out of the left side of the window
+ return
+ }
+
+ i, idx := 0, in.start
+ for i = 0; i < in.count; i++ {
+ if to < in.buffer[idx] { // found the first large inflight
+ break
+ }
+
+ // increase index and maybe rotate
+ if idx++; idx >= in.size {
+ idx -= in.size
+ }
+ }
+ // free i inflights and set new start index
+ in.count -= i
+ in.start = idx
+}
+
+func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
+
+// full returns true if the inflights is full.
+func (in *inflights) full() bool {
+ return in.count == in.size
+}
+
+// resets frees all inflights.
+func (in *inflights) reset() {
+ in.count = 0
+ in.start = 0
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/raft.go b/vendor/src/github.com/coreos/etcd/raft/raft.go
new file mode 100644
index 0000000000..5639fcb8f3
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/raft.go
@@ -0,0 +1,898 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/rand"
+ "sort"
+ "strings"
+
+ pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// None is a placeholder node ID used when there is no leader.
+const None uint64 = 0
+const noLimit = math.MaxUint64
+
+var errNoLeader = errors.New("no leader")
+
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Possible values for StateType.
+const (
+ StateFollower StateType = iota
+ StateCandidate
+ StateLeader
+)
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+ "StateFollower",
+ "StateCandidate",
+ "StateLeader",
+}
+
+func (st StateType) String() string {
+ return stmap[uint64(st)]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+ // ID is the identity of the local raft. ID cannot be 0.
+ ID uint64
+
+ // peers contains the IDs of all nodes (including self) in the raft cluster. It
+ // should only be set when starting a new raft cluster. Restarting raft from
+ // previous configuration will panic if peers is set. peer is private and only
+ // used for testing right now.
+ peers []uint64
+
+ // ElectionTick is the number of Node.Tick invocations that must pass between
+ // elections. That is, if a follower does not receive any message from the
+ // leader of current term before ElectionTick has elapsed, it will become
+ // candidate and start an election. ElectionTick must be greater than
+ // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+ // unnecessary leader switching.
+ ElectionTick int
+ // HeartbeatTick is the number of Node.Tick invocations that must pass between
+ // heartbeats. That is, a leader sends heartbeat messages to maintain its
+ // leadership every HeartbeatTick ticks.
+ HeartbeatTick int
+
+ // Storage is the storage for raft. raft generates entries and states to be
+ // stored in storage. raft reads the persisted entries and states out of
+ // Storage when it needs. raft reads out the previous state and configuration
+ // out of storage when restarting.
+ Storage Storage
+ // Applied is the last applied index. It should only be set when restarting
+ // raft. raft will not return entries to the application smaller or equal to
+ // Applied. If Applied is unset when restarting, raft might return previous
+ // applied entries. This is a very application dependent configuration.
+ Applied uint64
+
+ // MaxSizePerMsg limits the max size of each append message. Smaller value
+ // lowers the raft recovery cost(initial probing and message lost during normal
+ // operation). On the other side, it might affect the throughput during normal
+ // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
+ // message.
+ MaxSizePerMsg uint64
+ // MaxInflightMsgs limits the max number of in-flight append messages during
+ // optimistic replication phase. The application transportation layer usually
+ // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+ // overflowing that sending buffer. TODO (xiangli): feedback to application to
+ // limit the proposal rate?
+ MaxInflightMsgs int
+
+ // CheckQuorum specifies if the leader should check quorum activity. Leader
+ // steps down when quorum is not active for an electionTimeout.
+ CheckQuorum bool
+
+ // Logger is the logger used for raft log. For multinode which can host
+ // multiple raft group, each raft group can have its own logger
+ Logger Logger
+}
+
+func (c *Config) validate() error {
+ if c.ID == None {
+ return errors.New("cannot use none as id")
+ }
+
+ if c.HeartbeatTick <= 0 {
+ return errors.New("heartbeat tick must be greater than 0")
+ }
+
+ if c.ElectionTick <= c.HeartbeatTick {
+ return errors.New("election tick must be greater than heartbeat tick")
+ }
+
+ if c.Storage == nil {
+ return errors.New("storage cannot be nil")
+ }
+
+ if c.MaxInflightMsgs <= 0 {
+ return errors.New("max inflight messages must be greater than 0")
+ }
+
+ if c.Logger == nil {
+ c.Logger = raftLogger
+ }
+
+ return nil
+}
+
+type raft struct {
+ id uint64
+
+ Term uint64
+ Vote uint64
+
+ // the log
+ raftLog *raftLog
+
+ maxInflight int
+ maxMsgSize uint64
+ prs map[uint64]*Progress
+
+ state StateType
+
+ votes map[uint64]bool
+
+ msgs []pb.Message
+
+ // the leader id
+ lead uint64
+
+ // New configuration is ignored if there exists unapplied configuration.
+ pendingConf bool
+
+ // number of ticks since it reached last electionTimeout when it is leader
+ // or candidate.
+ // number of ticks since it reached last electionTimeout or received a
+ // valid message from current leader when it is a follower.
+ electionElapsed int
+
+ // number of ticks since it reached last heartbeatTimeout.
+ // only leader keeps heartbeatElapsed.
+ heartbeatElapsed int
+
+ checkQuorum bool
+
+ heartbeatTimeout int
+ electionTimeout int
+ rand *rand.Rand
+ tick func()
+ step stepFunc
+
+ logger Logger
+}
+
+func newRaft(c *Config) *raft {
+ if err := c.validate(); err != nil {
+ panic(err.Error())
+ }
+ raftlog := newLog(c.Storage, c.Logger)
+ hs, cs, err := c.Storage.InitialState()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ peers := c.peers
+ if len(cs.Nodes) > 0 {
+ if len(peers) > 0 {
+ // TODO(bdarnell): the peers argument is always nil except in
+ // tests; the argument should be removed and these tests should be
+ // updated to specify their nodes through a snapshot.
+ panic("cannot specify both newRaft(peers) and ConfState.Nodes)")
+ }
+ peers = cs.Nodes
+ }
+ r := &raft{
+ id: c.ID,
+ lead: None,
+ raftLog: raftlog,
+ maxMsgSize: c.MaxSizePerMsg,
+ maxInflight: c.MaxInflightMsgs,
+ prs: make(map[uint64]*Progress),
+ electionTimeout: c.ElectionTick,
+ heartbeatTimeout: c.HeartbeatTick,
+ logger: c.Logger,
+ checkQuorum: c.CheckQuorum,
+ }
+ r.rand = rand.New(rand.NewSource(int64(c.ID)))
+ for _, p := range peers {
+ r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
+ }
+ if !isHardStateEqual(hs, emptyState) {
+ r.loadState(hs)
+ }
+ if c.Applied > 0 {
+ raftlog.appliedTo(c.Applied)
+ }
+ r.becomeFollower(r.Term, None)
+
+ nodesStrs := make([]string, 0)
+ for _, n := range r.nodes() {
+ nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+ }
+
+ r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+ r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
+ return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+ return pb.HardState{
+ Term: r.Term,
+ Vote: r.Vote,
+ Commit: r.raftLog.committed,
+ }
+}
+
+func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
+
+func (r *raft) nodes() []uint64 {
+ nodes := make([]uint64, 0, len(r.prs))
+ for id := range r.prs {
+ nodes = append(nodes, id)
+ }
+ sort.Sort(uint64Slice(nodes))
+ return nodes
+}
+
+// send persists state to stable storage and then sends to its mailbox.
+func (r *raft) send(m pb.Message) {
+ m.From = r.id
+ // do not attach term to MsgProp
+ // proposals are a way to forward to the leader and
+ // should be treated as local message.
+ if m.Type != pb.MsgProp {
+ m.Term = r.Term
+ }
+ r.msgs = append(r.msgs, m)
+}
+
+// sendAppend sends RPC, with entries to the given peer.
+func (r *raft) sendAppend(to uint64) {
+ pr := r.prs[to]
+ if pr.isPaused() {
+ return
+ }
+ m := pb.Message{}
+ m.To = to
+
+ term, errt := r.raftLog.term(pr.Next - 1)
+ ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
+
+ if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
+ if !pr.RecentActive {
+ r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+ return
+ }
+
+ m.Type = pb.MsgSnap
+ snapshot, err := r.raftLog.snapshot()
+ if err != nil {
+ if err == ErrSnapshotTemporarilyUnavailable {
+ r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+ return
+ }
+ panic(err) // TODO(bdarnell)
+ }
+ if IsEmptySnap(snapshot) {
+ panic("need non-empty snapshot")
+ }
+ m.Snapshot = snapshot
+ sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+ r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+ r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+ pr.becomeSnapshot(sindex)
+ r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+ } else {
+ m.Type = pb.MsgApp
+ m.Index = pr.Next - 1
+ m.LogTerm = term
+ m.Entries = ents
+ m.Commit = r.raftLog.committed
+ if n := len(m.Entries); n != 0 {
+ switch pr.State {
+ // optimistically increase the next when in ProgressStateReplicate
+ case ProgressStateReplicate:
+ last := m.Entries[n-1].Index
+ pr.optimisticUpdate(last)
+ pr.ins.add(last)
+ case ProgressStateProbe:
+ pr.pause()
+ default:
+ r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
+ }
+ }
+ }
+ r.send(m)
+}
+
+// sendHeartbeat sends an empty MsgApp
+func (r *raft) sendHeartbeat(to uint64) {
+ // Attach the commit as min(to.matched, r.committed).
+ // When the leader sends out heartbeat message,
+ // the receiver(follower) might not be matched with the leader
+ // or it might not have all the committed entries.
+ // The leader MUST NOT forward the follower's commit to
+ // an unmatched index.
+ commit := min(r.prs[to].Match, r.raftLog.committed)
+ m := pb.Message{
+ To: to,
+ Type: pb.MsgHeartbeat,
+ Commit: commit,
+ }
+ r.send(m)
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.prs.
+func (r *raft) bcastAppend() {
+ for id := range r.prs {
+ if id == r.id {
+ continue
+ }
+ r.sendAppend(id)
+ }
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+ for id := range r.prs {
+ if id == r.id {
+ continue
+ }
+ r.sendHeartbeat(id)
+ r.prs[id].resume()
+ }
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if
+// the commit index changed (in which case the caller should call
+// r.bcastAppend).
+func (r *raft) maybeCommit() bool {
+ // TODO(bmizerany): optimize.. Currently naive
+ mis := make(uint64Slice, 0, len(r.prs))
+ for id := range r.prs {
+ mis = append(mis, r.prs[id].Match)
+ }
+ sort.Sort(sort.Reverse(mis))
+ mci := mis[r.quorum()-1]
+ return r.raftLog.maybeCommit(mci, r.Term)
+}
+
+func (r *raft) reset(term uint64) {
+ if r.Term != term {
+ r.Term = term
+ r.Vote = None
+ }
+ r.lead = None
+
+ r.electionElapsed = 0
+ r.heartbeatElapsed = 0
+
+ r.votes = make(map[uint64]bool)
+ for id := range r.prs {
+ r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)}
+ if id == r.id {
+ r.prs[id].Match = r.raftLog.lastIndex()
+ }
+ }
+ r.pendingConf = false
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) {
+ li := r.raftLog.lastIndex()
+ for i := range es {
+ es[i].Term = r.Term
+ es[i].Index = li + 1 + uint64(i)
+ }
+ r.raftLog.append(es...)
+ r.prs[r.id].maybeUpdate(r.raftLog.lastIndex())
+ // Regardless of maybeCommit's return, our caller will call bcastAppend.
+ r.maybeCommit()
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+ if !r.promotable() {
+ r.electionElapsed = 0
+ return
+ }
+ r.electionElapsed++
+ if r.isElectionTimeout() {
+ r.electionElapsed = 0
+ r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
+ }
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+ r.heartbeatElapsed++
+ r.electionElapsed++
+
+ if r.electionElapsed >= r.electionTimeout {
+ r.electionElapsed = 0
+ if r.checkQuorum {
+ r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
+ }
+ }
+
+ if r.state != StateLeader {
+ return
+ }
+
+ if r.heartbeatElapsed >= r.heartbeatTimeout {
+ r.heartbeatElapsed = 0
+ r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
+ }
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+ r.step = stepFollower
+ r.reset(term)
+ r.tick = r.tickElection
+ r.lead = lead
+ r.state = StateFollower
+ r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeCandidate() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateLeader {
+ panic("invalid transition [leader -> candidate]")
+ }
+ r.step = stepCandidate
+ r.reset(r.Term + 1)
+ r.tick = r.tickElection
+ r.Vote = r.id
+ r.state = StateCandidate
+ r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateFollower {
+ panic("invalid transition [follower -> leader]")
+ }
+ r.step = stepLeader
+ r.reset(r.Term)
+ r.tick = r.tickHeartbeat
+ r.lead = r.id
+ r.state = StateLeader
+ ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
+ if err != nil {
+ r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
+ }
+
+ for _, e := range ents {
+ if e.Type != pb.EntryConfChange {
+ continue
+ }
+ if r.pendingConf {
+ panic("unexpected double uncommitted config entry")
+ }
+ r.pendingConf = true
+ }
+ r.appendEntry(pb.Entry{Data: nil})
+ r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) campaign() {
+ r.becomeCandidate()
+ if r.quorum() == r.poll(r.id, true) {
+ r.becomeLeader()
+ return
+ }
+ for id := range r.prs {
+ if id == r.id {
+ continue
+ }
+ r.logger.Infof("%x [logterm: %d, index: %d] sent vote request to %x at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), id, r.Term)
+ r.send(pb.Message{To: id, Type: pb.MsgVote, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm()})
+ }
+}
+
+func (r *raft) poll(id uint64, v bool) (granted int) {
+ if v {
+ r.logger.Infof("%x received vote from %x at term %d", r.id, id, r.Term)
+ } else {
+ r.logger.Infof("%x received vote rejection from %x at term %d", r.id, id, r.Term)
+ }
+ if _, ok := r.votes[id]; !ok {
+ r.votes[id] = v
+ }
+ for _, vv := range r.votes {
+ if vv {
+ granted++
+ }
+ }
+ return granted
+}
+
+func (r *raft) Step(m pb.Message) error {
+ if m.Type == pb.MsgHup {
+ if r.state != StateLeader {
+ r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+ r.campaign()
+ } else {
+ r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+ }
+ return nil
+ }
+
+ switch {
+ case m.Term == 0:
+ // local message
+ case m.Term > r.Term:
+ lead := m.From
+ if m.Type == pb.MsgVote {
+ lead = None
+ }
+ r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+ r.id, r.Term, m.Type, m.From, m.Term)
+ r.becomeFollower(m.Term, lead)
+ case m.Term < r.Term:
+ // ignore
+ r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+ r.id, r.Term, m.Type, m.From, m.Term)
+ return nil
+ }
+ r.step(r, m)
+ return nil
+}
+
+type stepFunc func(r *raft, m pb.Message)
+
+func stepLeader(r *raft, m pb.Message) {
+
+ // These message types do not require any progress for m.From.
+ switch m.Type {
+ case pb.MsgBeat:
+ r.bcastHeartbeat()
+ return
+ case pb.MsgCheckQuorum:
+ if !r.checkQuorumActive() {
+ r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+ r.becomeFollower(r.Term, None)
+ }
+ return
+ case pb.MsgProp:
+ if len(m.Entries) == 0 {
+ r.logger.Panicf("%x stepped empty MsgProp", r.id)
+ }
+ if _, ok := r.prs[r.id]; !ok {
+ // If we are not currently a member of the range (i.e. this node
+ // was removed from the configuration while serving as leader),
+ // drop any new proposals.
+ return
+ }
+ for i, e := range m.Entries {
+ if e.Type == pb.EntryConfChange {
+ if r.pendingConf {
+ m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+ }
+ r.pendingConf = true
+ }
+ }
+ r.appendEntry(m.Entries...)
+ r.bcastAppend()
+ return
+ case pb.MsgVote:
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+ r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
+ return
+ }
+
+ // All other message types require a progress for m.From (pr).
+ pr, prOk := r.prs[m.From]
+ if !prOk {
+ r.logger.Debugf("no progress available for %x", m.From)
+ return
+ }
+ switch m.Type {
+ case pb.MsgAppResp:
+ pr.RecentActive = true
+
+ if m.Reject {
+ r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
+ r.id, m.RejectHint, m.From, m.Index)
+ if pr.maybeDecrTo(m.Index, m.RejectHint) {
+ r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+ if pr.State == ProgressStateReplicate {
+ pr.becomeProbe()
+ }
+ r.sendAppend(m.From)
+ }
+ } else {
+ oldPaused := pr.isPaused()
+ if pr.maybeUpdate(m.Index) {
+ switch {
+ case pr.State == ProgressStateProbe:
+ pr.becomeReplicate()
+ case pr.State == ProgressStateSnapshot && pr.maybeSnapshotAbort():
+ r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ pr.becomeProbe()
+ case pr.State == ProgressStateReplicate:
+ pr.ins.freeTo(m.Index)
+ }
+
+ if r.maybeCommit() {
+ r.bcastAppend()
+ } else if oldPaused {
+ // update() reset the wait state on this node. If we had delayed sending
+ // an update before, send it now.
+ r.sendAppend(m.From)
+ }
+ }
+ }
+ case pb.MsgHeartbeatResp:
+ pr.RecentActive = true
+
+ // free one slot for the full inflights window to allow progress.
+ if pr.State == ProgressStateReplicate && pr.ins.full() {
+ pr.ins.freeFirstOne()
+ }
+ if pr.Match < r.raftLog.lastIndex() {
+ r.sendAppend(m.From)
+ }
+ case pb.MsgSnapStatus:
+ if pr.State != ProgressStateSnapshot {
+ return
+ }
+ if !m.Reject {
+ pr.becomeProbe()
+ r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ } else {
+ pr.snapshotFailure()
+ pr.becomeProbe()
+ r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ }
+ // If snapshot finish, wait for the msgAppResp from the remote node before sending
+ // out the next msgApp.
+ // If snapshot failure, wait for a heartbeat interval before next try
+ pr.pause()
+ case pb.MsgUnreachable:
+ // During optimistic replication, if the remote becomes unreachable,
+ // there is huge probability that a MsgApp is lost.
+ if pr.State == ProgressStateReplicate {
+ pr.becomeProbe()
+ }
+ r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+ }
+}
+
+func stepCandidate(r *raft, m pb.Message) {
+ switch m.Type {
+ case pb.MsgProp:
+ r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+ return
+ case pb.MsgApp:
+ r.becomeFollower(r.Term, m.From)
+ r.handleAppendEntries(m)
+ case pb.MsgHeartbeat:
+ r.becomeFollower(r.Term, m.From)
+ r.handleHeartbeat(m)
+ case pb.MsgSnap:
+ r.becomeFollower(m.Term, m.From)
+ r.handleSnapshot(m)
+ case pb.MsgVote:
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+ r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
+ case pb.MsgVoteResp:
+ gr := r.poll(m.From, !m.Reject)
+ r.logger.Infof("%x [quorum:%d] has received %d votes and %d vote rejections", r.id, r.quorum(), gr, len(r.votes)-gr)
+ switch r.quorum() {
+ case gr:
+ r.becomeLeader()
+ r.bcastAppend()
+ case len(r.votes) - gr:
+ r.becomeFollower(r.Term, None)
+ }
+ }
+}
+
+func stepFollower(r *raft, m pb.Message) {
+ switch m.Type {
+ case pb.MsgProp:
+ if r.lead == None {
+ r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+ return
+ }
+ m.To = r.lead
+ r.send(m)
+ case pb.MsgApp:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleAppendEntries(m)
+ case pb.MsgHeartbeat:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleHeartbeat(m)
+ case pb.MsgSnap:
+ r.electionElapsed = 0
+ r.handleSnapshot(m)
+ case pb.MsgVote:
+ if (r.Vote == None || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
+ r.electionElapsed = 0
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] voted for %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+ r.Vote = m.From
+ r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp})
+ } else {
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+ r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
+ }
+ }
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+ if m.Index < r.raftLog.committed {
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+ return
+ }
+
+ if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+ } else {
+ r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
+ r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
+ }
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+ r.raftLog.commitTo(m.Commit)
+ r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+ sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
+ if r.restore(m.Snapshot) {
+ r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, sindex, sterm)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+ } else {
+ r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, sindex, sterm)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+ }
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine.
+func (r *raft) restore(s pb.Snapshot) bool {
+ if s.Metadata.Index <= r.raftLog.committed {
+ return false
+ }
+ if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
+ r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+ r.raftLog.commitTo(s.Metadata.Index)
+ return false
+ }
+
+ r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+
+ r.raftLog.restore(s)
+ r.prs = make(map[uint64]*Progress)
+ for _, n := range s.Metadata.ConfState.Nodes {
+ match, next := uint64(0), uint64(r.raftLog.lastIndex())+1
+ if n == r.id {
+ match = next - 1
+ } else {
+ match = 0
+ }
+ r.setProgress(n, match, next)
+ r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n])
+ }
+ return true
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+ _, ok := r.prs[r.id]
+ return ok
+}
+
+func (r *raft) addNode(id uint64) {
+ if _, ok := r.prs[id]; ok {
+ // Ignore any redundant addNode calls (which can happen because the
+ // initial bootstrapping entries are applied twice).
+ return
+ }
+
+ r.setProgress(id, 0, r.raftLog.lastIndex()+1)
+ r.pendingConf = false
+}
+
+func (r *raft) removeNode(id uint64) {
+ r.delProgress(id)
+ r.pendingConf = false
+ // The quorum size is now smaller, so see if any pending entries can
+ // be committed.
+ if r.maybeCommit() {
+ r.bcastAppend()
+ }
+}
+
+func (r *raft) resetPendingConf() { r.pendingConf = false }
+
+func (r *raft) setProgress(id, match, next uint64) {
+ r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+}
+
+func (r *raft) delProgress(id uint64) {
+ delete(r.prs, id)
+}
+
+func (r *raft) loadState(state pb.HardState) {
+ if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+ r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+ }
+ r.raftLog.committed = state.Commit
+ r.Term = state.Term
+ r.Vote = state.Vote
+}
+
+// isElectionTimeout returns true if r.electionElapsed is greater than the
+// randomized election timeout in (electiontimeout, 2 * electiontimeout - 1).
+// Otherwise, it returns false.
+func (r *raft) isElectionTimeout() bool {
+ d := r.electionElapsed - r.electionTimeout
+ if d < 0 {
+ return false
+ }
+ return d > r.rand.Int()%r.electionTimeout
+}
+
+// checkQuorumActive returns true if the quorum is active from
+// the view of the local raft state machine. Otherwise, it returns
+// false.
+// checkQuorumActive also resets all RecentActive to false.
+func (r *raft) checkQuorumActive() bool {
+ var act int
+
+ for id := range r.prs {
+ if id == r.id { // self is always active
+ act++
+ continue
+ }
+
+ if r.prs[id].RecentActive {
+ act++
+ }
+
+ r.prs[id].RecentActive = false
+ }
+
+ return act >= r.quorum()
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go
new file mode 100644
index 0000000000..319134cdeb
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go
@@ -0,0 +1,1768 @@
+// Code generated by protoc-gen-gogo.
+// source: raft.proto
+// DO NOT EDIT!
+
+/*
+ Package raftpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ raft.proto
+
+ It has these top-level messages:
+ Entry
+ SnapshotMetadata
+ Snapshot
+ Message
+ HardState
+ ConfState
+ ConfChange
+*/
+package raftpb
+
+import (
+ "fmt"
+
+ proto "github.com/gogo/protobuf/proto"
+)
+
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type EntryType int32
+
+const (
+ EntryNormal EntryType = 0
+ EntryConfChange EntryType = 1
+)
+
+var EntryType_name = map[int32]string{
+ 0: "EntryNormal",
+ 1: "EntryConfChange",
+}
+var EntryType_value = map[string]int32{
+ "EntryNormal": 0,
+ "EntryConfChange": 1,
+}
+
+func (x EntryType) Enum() *EntryType {
+ p := new(EntryType)
+ *p = x
+ return p
+}
+func (x EntryType) String() string {
+ return proto.EnumName(EntryType_name, int32(x))
+}
+func (x *EntryType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
+ if err != nil {
+ return err
+ }
+ *x = EntryType(value)
+ return nil
+}
+
+type MessageType int32
+
+const (
+ MsgHup MessageType = 0
+ MsgBeat MessageType = 1
+ MsgProp MessageType = 2
+ MsgApp MessageType = 3
+ MsgAppResp MessageType = 4
+ MsgVote MessageType = 5
+ MsgVoteResp MessageType = 6
+ MsgSnap MessageType = 7
+ MsgHeartbeat MessageType = 8
+ MsgHeartbeatResp MessageType = 9
+ MsgUnreachable MessageType = 10
+ MsgSnapStatus MessageType = 11
+ MsgCheckQuorum MessageType = 12
+)
+
+var MessageType_name = map[int32]string{
+ 0: "MsgHup",
+ 1: "MsgBeat",
+ 2: "MsgProp",
+ 3: "MsgApp",
+ 4: "MsgAppResp",
+ 5: "MsgVote",
+ 6: "MsgVoteResp",
+ 7: "MsgSnap",
+ 8: "MsgHeartbeat",
+ 9: "MsgHeartbeatResp",
+ 10: "MsgUnreachable",
+ 11: "MsgSnapStatus",
+ 12: "MsgCheckQuorum",
+}
+var MessageType_value = map[string]int32{
+ "MsgHup": 0,
+ "MsgBeat": 1,
+ "MsgProp": 2,
+ "MsgApp": 3,
+ "MsgAppResp": 4,
+ "MsgVote": 5,
+ "MsgVoteResp": 6,
+ "MsgSnap": 7,
+ "MsgHeartbeat": 8,
+ "MsgHeartbeatResp": 9,
+ "MsgUnreachable": 10,
+ "MsgSnapStatus": 11,
+ "MsgCheckQuorum": 12,
+}
+
+func (x MessageType) Enum() *MessageType {
+ p := new(MessageType)
+ *p = x
+ return p
+}
+func (x MessageType) String() string {
+ return proto.EnumName(MessageType_name, int32(x))
+}
+func (x *MessageType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
+ if err != nil {
+ return err
+ }
+ *x = MessageType(value)
+ return nil
+}
+
+type ConfChangeType int32
+
+const (
+ ConfChangeAddNode ConfChangeType = 0
+ ConfChangeRemoveNode ConfChangeType = 1
+ ConfChangeUpdateNode ConfChangeType = 2
+)
+
+var ConfChangeType_name = map[int32]string{
+ 0: "ConfChangeAddNode",
+ 1: "ConfChangeRemoveNode",
+ 2: "ConfChangeUpdateNode",
+}
+var ConfChangeType_value = map[string]int32{
+ "ConfChangeAddNode": 0,
+ "ConfChangeRemoveNode": 1,
+ "ConfChangeUpdateNode": 2,
+}
+
+func (x ConfChangeType) Enum() *ConfChangeType {
+ p := new(ConfChangeType)
+ *p = x
+ return p
+}
+func (x ConfChangeType) String() string {
+ return proto.EnumName(ConfChangeType_name, int32(x))
+}
+func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
+ if err != nil {
+ return err
+ }
+ *x = ConfChangeType(value)
+ return nil
+}
+
+type Entry struct {
+ Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+ Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
+ Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
+ Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+
+type SnapshotMetadata struct {
+ ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state" json:"conf_state"`
+ Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage() {}
+
+type Snapshot struct {
+ Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+ Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+type Message struct {
+ Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+ To uint64 `protobuf:"varint,2,opt,name=to" json:"to"`
+ From uint64 `protobuf:"varint,3,opt,name=from" json:"from"`
+ Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"`
+ LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+ Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"`
+ Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+ Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"`
+ Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
+ Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"`
+ RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+
+type HardState struct {
+ Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
+ Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
+ Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *HardState) Reset() { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage() {}
+
+type ConfState struct {
+ Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConfState) Reset() { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage() {}
+
+type ConfChange struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
+ Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
+ NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
+ Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConfChange) Reset() { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*Entry)(nil), "raftpb.Entry")
+ proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
+ proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
+ proto.RegisterType((*Message)(nil), "raftpb.Message")
+ proto.RegisterType((*HardState)(nil), "raftpb.HardState")
+ proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
+ proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
+ proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+ proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+ proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
+}
+func (m *Entry) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Entry) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Type))
+ data[i] = 0x10
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Term))
+ data[i] = 0x18
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Index))
+ if m.Data != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintRaft(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *SnapshotMetadata) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SnapshotMetadata) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.ConfState.Size()))
+ n1, err := m.ConfState.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x10
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Index))
+ data[i] = 0x18
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Snapshot) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Data != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintRaft(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Metadata.Size()))
+ n2, err := m.Metadata.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Message) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Message) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Type))
+ data[i] = 0x10
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.To))
+ data[i] = 0x18
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.From))
+ data[i] = 0x20
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Term))
+ data[i] = 0x28
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.LogTerm))
+ data[i] = 0x30
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Index))
+ if len(m.Entries) > 0 {
+ for _, msg := range m.Entries {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintRaft(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x40
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Commit))
+ data[i] = 0x4a
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Snapshot.Size()))
+ n3, err := m.Snapshot.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ data[i] = 0x50
+ i++
+ if m.Reject {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x58
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.RejectHint))
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *HardState) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HardState) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Term))
+ data[i] = 0x10
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Vote))
+ data[i] = 0x18
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Commit))
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *ConfState) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ConfState) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, num := range m.Nodes {
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(num))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *ConfChange) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ConfChange) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.ID))
+ data[i] = 0x10
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Type))
+ data[i] = 0x18
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.NodeID))
+ if m.Context != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintRaft(data, i, uint64(len(m.Context)))
+ i += copy(data[i:], m.Context)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeFixed64Raft(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Raft(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintRaft(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Entry) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.Index))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SnapshotMetadata) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ConfState.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ n += 1 + sovRaft(uint64(m.Index))
+ n += 1 + sovRaft(uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Snapshot) Size() (n int) {
+ var l int
+ _ = l
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ l = m.Metadata.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Message) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.To))
+ n += 1 + sovRaft(uint64(m.From))
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.LogTerm))
+ n += 1 + sovRaft(uint64(m.Index))
+ if len(m.Entries) > 0 {
+ for _, e := range m.Entries {
+ l = e.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ }
+ n += 1 + sovRaft(uint64(m.Commit))
+ l = m.Snapshot.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ n += 2
+ n += 1 + sovRaft(uint64(m.RejectHint))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *HardState) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.Vote))
+ n += 1 + sovRaft(uint64(m.Commit))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ConfState) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, e := range m.Nodes {
+ n += 1 + sovRaft(uint64(e))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ConfChange) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.ID))
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.NodeID))
+ if m.Context != nil {
+ l = len(m.Context)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovRaft(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRaft(x uint64) (n int) {
+ return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Entry) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (EntryType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotMetadata) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ConfState.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Snapshot) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Metadata.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Message) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Message: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (MessageType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ m.To = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.To |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ m.From = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.From |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
+ }
+ m.LogTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LogTerm |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Entries = append(m.Entries, Entry{})
+ if err := m.Entries[len(m.Entries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ m.Commit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Commit |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Snapshot.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Reject = bool(v != 0)
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
+ }
+ m.RejectHint = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.RejectHint |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HardState) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HardState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+ }
+ m.Vote = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Vote |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ m.Commit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Commit |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfState) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Nodes = append(m.Nodes, v)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfChange) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (ConfChangeType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ m.NodeID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.NodeID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Context = append(m.Context[:0], data[iNdEx:postIndex]...)
+ if m.Context == nil {
+ m.Context = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRaft(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRaft
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRaft(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto
new file mode 100644
index 0000000000..0a98b8cfa5
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto
@@ -0,0 +1,86 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+enum EntryType {
+ EntryNormal = 0;
+ EntryConfChange = 1;
+}
+
+message Entry {
+ optional EntryType Type = 1 [(gogoproto.nullable) = false];
+ optional uint64 Term = 2 [(gogoproto.nullable) = false];
+ optional uint64 Index = 3 [(gogoproto.nullable) = false];
+ optional bytes Data = 4;
+}
+
+message SnapshotMetadata {
+ optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+ optional uint64 index = 2 [(gogoproto.nullable) = false];
+ optional uint64 term = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+ optional bytes data = 1;
+ optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+enum MessageType {
+ MsgHup = 0;
+ MsgBeat = 1;
+ MsgProp = 2;
+ MsgApp = 3;
+ MsgAppResp = 4;
+ MsgVote = 5;
+ MsgVoteResp = 6;
+ MsgSnap = 7;
+ MsgHeartbeat = 8;
+ MsgHeartbeatResp = 9;
+ MsgUnreachable = 10;
+ MsgSnapStatus = 11;
+ MsgCheckQuorum = 12;
+}
+
+message Message {
+ optional MessageType type = 1 [(gogoproto.nullable) = false];
+ optional uint64 to = 2 [(gogoproto.nullable) = false];
+ optional uint64 from = 3 [(gogoproto.nullable) = false];
+ optional uint64 term = 4 [(gogoproto.nullable) = false];
+ optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
+ optional uint64 index = 6 [(gogoproto.nullable) = false];
+ repeated Entry entries = 7 [(gogoproto.nullable) = false];
+ optional uint64 commit = 8 [(gogoproto.nullable) = false];
+ optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
+ optional bool reject = 10 [(gogoproto.nullable) = false];
+ optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
+}
+
+message HardState {
+ optional uint64 term = 1 [(gogoproto.nullable) = false];
+ optional uint64 vote = 2 [(gogoproto.nullable) = false];
+ optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+message ConfState {
+ repeated uint64 nodes = 1;
+}
+
+enum ConfChangeType {
+ ConfChangeAddNode = 0;
+ ConfChangeRemoveNode = 1;
+ ConfChangeUpdateNode = 2;
+}
+
+message ConfChange {
+ optional uint64 ID = 1 [(gogoproto.nullable) = false];
+ optional ConfChangeType Type = 2 [(gogoproto.nullable) = false];
+ optional uint64 NodeID = 3 [(gogoproto.nullable) = false];
+ optional bytes Context = 4;
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/rawnode.go b/vendor/src/github.com/coreos/etcd/raft/rawnode.go
new file mode 100644
index 0000000000..8cf0858917
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/rawnode.go
@@ -0,0 +1,228 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+
+ pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.prs for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+ raft *raft
+ prevSoftSt *SoftState
+ prevHardSt pb.HardState
+}
+
+func (rn *RawNode) newReady() Ready {
+ return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
+}
+
+func (rn *RawNode) commitReady(rd Ready) {
+ if rd.SoftState != nil {
+ rn.prevSoftSt = rd.SoftState
+ }
+ if !IsEmptyHardState(rd.HardState) {
+ rn.prevHardSt = rd.HardState
+ }
+ if rn.prevHardSt.Commit != 0 {
+ // In most cases, prevHardSt and rd.HardState will be the same
+ // because when there are new entries to apply we just sent a
+ // HardState with an updated Commit value. However, on initial
+ // startup the two are different because we don't send a HardState
+ // until something changes, but we do send any un-applied but
+ // committed entries (and previously-committed entries may be
+ // incorporated into the snapshot, even if rd.CommittedEntries is
+ // empty). Therefore we mark all committed entries as applied
+ // whether they were included in rd.HardState or not.
+ rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
+ }
+ if len(rd.Entries) > 0 {
+ e := rd.Entries[len(rd.Entries)-1]
+ rn.raft.raftLog.stableTo(e.Index, e.Term)
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
+ }
+}
+
+// NewRawNode returns a new RawNode given configuration and a list of raft peers.
+func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
+ if config.ID == 0 {
+ panic("config.ID must not be zero")
+ }
+ r := newRaft(config)
+ rn := &RawNode{
+ raft: r,
+ }
+ lastIndex, err := config.Storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ // If the log is empty, this is a new RawNode (like StartNode); otherwise it's
+ // restoring an existing RawNode (like RestartNode).
+ // TODO(bdarnell): rethink RawNode initialization and whether the application needs
+ // to be able to tell us when it expects the RawNode to exist.
+ if lastIndex == 0 {
+ r.becomeFollower(1, None)
+ ents := make([]pb.Entry, len(peers))
+ for i, peer := range peers {
+ cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+ data, err := cc.Marshal()
+ if err != nil {
+ panic("unexpected marshal error")
+ }
+
+ ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+ }
+ r.raftLog.append(ents...)
+ r.raftLog.committed = uint64(len(ents))
+ for _, peer := range peers {
+ r.addNode(peer.ID)
+ }
+ }
+ // Set the initial hard and soft states after performing all initialization.
+ rn.prevSoftSt = r.softState()
+ rn.prevHardSt = r.hardState()
+
+ return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+ rn.raft.tick()
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgHup,
+ })
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgProp,
+ From: rn.raft.id,
+ Entries: []pb.Entry{
+ {Data: data},
+ }})
+}
+
+// ProposeConfChange proposes a config change.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
+ data, err := cc.Marshal()
+ if err != nil {
+ return err
+ }
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgProp,
+ Entries: []pb.Entry{
+ {Type: pb.EntryConfChange, Data: data},
+ },
+ })
+}
+
+// ApplyConfChange applies a config change to the local node.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+ if cc.NodeID == None {
+ rn.raft.resetPendingConf()
+ return &pb.ConfState{Nodes: rn.raft.nodes()}
+ }
+ switch cc.Type {
+ case pb.ConfChangeAddNode:
+ rn.raft.addNode(cc.NodeID)
+ case pb.ConfChangeRemoveNode:
+ rn.raft.removeNode(cc.NodeID)
+ case pb.ConfChangeUpdateNode:
+ rn.raft.resetPendingConf()
+ default:
+ panic("unexpected conf type")
+ }
+ return &pb.ConfState{Nodes: rn.raft.nodes()}
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+ // ignore unexpected local messages receiving over network
+ if IsLocalMsg(m) {
+ return ErrStepLocalMsg
+ }
+ if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m) {
+ return rn.raft.Step(m)
+ }
+ return ErrStepPeerNotFound
+}
+
+// Ready returns the current point-in-time state of this RawNode.
+func (rn *RawNode) Ready() Ready {
+ rd := rn.newReady()
+ rn.raft.msgs = nil
+ return rd
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+// Checking logic in this method should be consistent with Ready.containsUpdates().
+func (rn *RawNode) HasReady() bool {
+ r := rn.raft
+ if !r.softState().equal(rn.prevSoftSt) {
+ return true
+ }
+ if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+ return true
+ }
+ if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
+ return true
+ }
+ if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
+ return true
+ }
+ return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+func (rn *RawNode) Advance(rd Ready) {
+ rn.commitReady(rd)
+}
+
+// Status returns the current status of the given group.
+func (rn *RawNode) Status() *Status {
+ status := getStatus(rn.raft)
+ return &status
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+ rej := status == SnapshotFailure
+
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/status.go b/vendor/src/github.com/coreos/etcd/raft/status.go
new file mode 100644
index 0000000000..d18a489541
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/status.go
@@ -0,0 +1,76 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+
+ pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type Status struct {
+ ID uint64
+
+ pb.HardState
+ SoftState
+
+ Applied uint64
+ Progress map[uint64]Progress
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+ s := Status{ID: r.id}
+ s.HardState = r.hardState()
+ s.SoftState = *r.softState()
+
+ s.Applied = r.raftLog.applied
+
+ if s.RaftState == StateLeader {
+ s.Progress = make(map[uint64]Progress)
+ for id, p := range r.prs {
+ s.Progress[id] = *p
+ }
+ }
+
+ return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+ j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`,
+ s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState)
+
+ if len(s.Progress) == 0 {
+ j += "}}"
+ } else {
+ for k, v := range s.Progress {
+ subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+ j += subj
+ }
+ // remove the trailing ","
+ j = j[:len(j)-1] + "}}"
+ }
+ return []byte(j), nil
+}
+
+func (s Status) String() string {
+ b, err := s.MarshalJSON()
+ if err != nil {
+ raftLogger.Panicf("unexpected error: %v", err)
+ }
+ return string(b)
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/storage.go b/vendor/src/github.com/coreos/etcd/raft/storage.go
new file mode 100644
index 0000000000..f3724162ce
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/storage.go
@@ -0,0 +1,252 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+ "sync"
+
+ pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+ // InitialState returns the saved HardState and ConfState information.
+ InitialState() (pb.HardState, pb.ConfState, error)
+ // Entries returns a slice of log entries in the range [lo,hi).
+ // MaxSize limits the total size of the log entries returned, but
+ // Entries returns at least one entry if any.
+ Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+ // Term returns the term of entry i, which must be in the range
+ // [FirstIndex()-1, LastIndex()]. The term of the entry before
+ // FirstIndex is retained for matching purposes even though the
+ // rest of that entry may not be available.
+ Term(i uint64) (uint64, error)
+ // LastIndex returns the index of the last entry in the log.
+ LastIndex() (uint64, error)
+ // FirstIndex returns the index of the first log entry that is
+ // possibly available via Entries (older entries have been incorporated
+ // into the latest Snapshot; if storage only contains the dummy entry the
+ // first log entry is not available).
+ FirstIndex() (uint64, error)
+ // Snapshot returns the most recent snapshot.
+ // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+ // so raft state machine could know that Storage needs some time to prepare
+ // snapshot and call Snapshot later.
+ Snapshot() (pb.Snapshot, error)
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+ // Protects access to all fields. Most methods of MemoryStorage are
+ // run on the raft goroutine, but Append() is run on an application
+ // goroutine.
+ sync.Mutex
+
+ hardState pb.HardState
+ snapshot pb.Snapshot
+ // ents[i] has raft log position i+snapshot.Metadata.Index
+ ents []pb.Entry
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+ return &MemoryStorage{
+ // When starting from scratch populate the list with a dummy entry at term zero.
+ ents: make([]pb.Entry, 1),
+ }
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+ return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+ ms.hardState = st
+ return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ offset := ms.ents[0].Index
+ if lo <= offset {
+ return nil, ErrCompacted
+ }
+ if hi > ms.lastIndex()+1 {
+ raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+ }
+ // only contains dummy entries.
+ if len(ms.ents) == 1 {
+ return nil, ErrUnavailable
+ }
+
+ ents := ms.ents[lo-offset : hi-offset]
+ return limitSize(ents, maxSize), nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ offset := ms.ents[0].Index
+ if i < offset {
+ return 0, ErrCompacted
+ }
+ return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+ return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+ return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+ ms.Lock()
+ defer ms.Unlock()
+
+ // TODO: return ErrSnapOutOfDate?
+ ms.snapshot = snap
+ ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+ return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ if i <= ms.snapshot.Metadata.Index {
+ return pb.Snapshot{}, ErrSnapOutOfDate
+ }
+
+ offset := ms.ents[0].Index
+ if i > ms.lastIndex() {
+ raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+ }
+
+ ms.snapshot.Metadata.Index = i
+ ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+ if cs != nil {
+ ms.snapshot.Metadata.ConfState = *cs
+ }
+ ms.snapshot.Data = data
+ return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+ ms.Lock()
+ defer ms.Unlock()
+ offset := ms.ents[0].Index
+ if compactIndex <= offset {
+ return ErrCompacted
+ }
+ if compactIndex > ms.lastIndex() {
+ raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+ }
+
+ i := compactIndex - offset
+ ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
+ ents[0].Index = ms.ents[i].Index
+ ents[0].Term = ms.ents[i].Term
+ ents = append(ents, ms.ents[i+1:]...)
+ ms.ents = ents
+ return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+ ms.Lock()
+ defer ms.Unlock()
+ if len(entries) == 0 {
+ return nil
+ }
+ first := ms.ents[0].Index + 1
+ last := entries[0].Index + uint64(len(entries)) - 1
+
+ // shortcut if there is no new entry.
+ if last < first {
+ return nil
+ }
+ // truncate compacted entries
+ if first > entries[0].Index {
+ entries = entries[first-entries[0].Index:]
+ }
+
+ offset := entries[0].Index - ms.ents[0].Index
+ switch {
+ case uint64(len(ms.ents)) > offset:
+ ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
+ ms.ents = append(ms.ents, entries...)
+ case uint64(len(ms.ents)) == offset:
+ ms.ents = append(ms.ents, entries...)
+ default:
+ raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
+ ms.lastIndex(), entries[0].Index)
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/coreos/etcd/raft/util.go b/vendor/src/github.com/coreos/etcd/raft/util.go
new file mode 100644
index 0000000000..8d4c419001
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/raft/util.go
@@ -0,0 +1,116 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "bytes"
+ "fmt"
+
+ pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+// uint64Slice implements sort interface
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int { return len(p) }
+func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func min(a, b uint64) uint64 {
+ if a > b {
+ return b
+ }
+ return a
+}
+
+func max(a, b uint64) uint64 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func IsLocalMsg(m pb.Message) bool {
+ return m.Type == pb.MsgHup || m.Type == pb.MsgBeat || m.Type == pb.MsgUnreachable || m.Type == pb.MsgSnapStatus || m.Type == pb.MsgCheckQuorum
+}
+
+func IsResponseMsg(m pb.Message) bool {
+ return m.Type == pb.MsgAppResp || m.Type == pb.MsgVoteResp || m.Type == pb.MsgHeartbeatResp || m.Type == pb.MsgUnreachable
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
+ if m.Reject {
+ fmt.Fprintf(&buf, " Rejected")
+ if m.RejectHint != 0 {
+ fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
+ }
+ }
+ if m.Commit != 0 {
+ fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+ }
+ if len(m.Entries) > 0 {
+ fmt.Fprintf(&buf, " Entries:[")
+ for i, e := range m.Entries {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(DescribeEntry(e, f))
+ }
+ fmt.Fprintf(&buf, "]")
+ }
+ if !IsEmptySnap(m.Snapshot) {
+ fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
+ }
+ return buf.String()
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+ var formatted string
+ if e.Type == pb.EntryNormal && f != nil {
+ formatted = f(e.Data)
+ } else {
+ formatted = fmt.Sprintf("%q", e.Data)
+ }
+ return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
+}
+
+func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
+ if len(ents) == 0 {
+ return ents
+ }
+ size := ents[0].Size()
+ var limit int
+ for limit = 1; limit < len(ents); limit++ {
+ size += ents[limit].Size()
+ if uint64(size) > maxSize {
+ break
+ }
+ }
+ return ents[:limit]
+}
diff --git a/vendor/src/github.com/coreos/etcd/snap/db.go b/vendor/src/github.com/coreos/etcd/snap/db.go
new file mode 100644
index 0000000000..ca68837cb1
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/snap/db.go
@@ -0,0 +1,74 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/coreos/etcd/pkg/fileutil"
+)
+
+// SaveDBFrom saves snapshot of the database from the given reader. It
+// guarantees the save operation is atomic.
+func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) error {
+ f, err := ioutil.TempFile(s.dir, "tmp")
+ if err != nil {
+ return err
+ }
+ var n int64
+ n, err = io.Copy(f, r)
+ if err == nil {
+ err = f.Sync()
+ }
+ f.Close()
+ if err != nil {
+ os.Remove(f.Name())
+ return err
+ }
+ fn := path.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
+ if fileutil.Exist(fn) {
+ os.Remove(f.Name())
+ return nil
+ }
+ err = os.Rename(f.Name(), fn)
+ if err != nil {
+ os.Remove(f.Name())
+ return err
+ }
+
+ plog.Infof("saved database snapshot to disk [total bytes: %d]", n)
+
+ return nil
+}
+
+// DBFilePath returns the file path for the snapshot of the database with
+// given id. If the snapshot does not exist, it returns error.
+func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
+ fns, err := fileutil.ReadDir(s.dir)
+ if err != nil {
+ return "", err
+ }
+ wfn := fmt.Sprintf("%016x.snap.db", id)
+ for _, fn := range fns {
+ if fn == wfn {
+ return path.Join(s.dir, fn), nil
+ }
+ }
+ return "", fmt.Errorf("snap: snapshot file doesn't exist")
+}
diff --git a/vendor/src/github.com/coreos/etcd/snap/message.go b/vendor/src/github.com/coreos/etcd/snap/message.go
new file mode 100644
index 0000000000..2d2b211061
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/snap/message.go
@@ -0,0 +1,59 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "io"
+
+ "github.com/coreos/etcd/raft/raftpb"
+)
+
+// Message is a struct that contains a raft Message and a ReadCloser. The type
+// of raft message MUST be MsgSnap, which contains the raft meta-data and an
+// additional data []byte field that contains the snapshot of the actual state
+// machine.
+// Message contains the ReadCloser field for handling large snapshot. This avoid
+// copying the entire snapshot into a byte array, which consumes a lot of memory.
+//
+// User of Message should close the Message after sending it.
+type Message struct {
+ raftpb.Message
+ ReadCloser io.ReadCloser
+ closeC chan bool
+}
+
+func NewMessage(rs raftpb.Message, rc io.ReadCloser) *Message {
+ return &Message{
+ Message: rs,
+ ReadCloser: rc,
+ closeC: make(chan bool, 1),
+ }
+}
+
+// CloseNotify returns a channel that receives a single value
+// when the message sent is finished. true indicates the sent
+// is successful.
+func (m Message) CloseNotify() <-chan bool {
+ return m.closeC
+}
+
+func (m Message) CloseWithError(err error) {
+ m.ReadCloser.Close()
+ if err == nil {
+ m.closeC <- true
+ } else {
+ m.closeC <- false
+ }
+}
diff --git a/vendor/src/github.com/coreos/etcd/snap/metrics.go b/vendor/src/github.com/coreos/etcd/snap/metrics.go
new file mode 100644
index 0000000000..88aad5dc9c
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/snap/metrics.go
@@ -0,0 +1,41 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ // TODO: save_fsync latency?
+ saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snapshot",
+ Name: "save_total_durations_seconds",
+ Help: "The total latency distributions of save called by snapshot.",
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snapshot",
+ Name: "save_marshalling_durations_seconds",
+ Help: "The marshalling cost distributions of save called by snapshot.",
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+)
+
+func init() {
+ prometheus.MustRegister(saveDurations)
+ prometheus.MustRegister(marshallingDurations)
+}
diff --git a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go
new file mode 100644
index 0000000000..5d1d21ab31
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go
@@ -0,0 +1,332 @@
+// Code generated by protoc-gen-gogo.
+// source: snap.proto
+// DO NOT EDIT!
+
+/*
+ Package snappb is a generated protocol buffer package.
+
+ It is generated from these files:
+ snap.proto
+
+ It has these top-level messages:
+ Snapshot
+*/
+package snappb
+
+import (
+ "fmt"
+
+ proto "github.com/gogo/protobuf/proto"
+)
+
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Snapshot struct {
+ Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
+}
+func (m *Snapshot) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintSnap(data, i, uint64(m.Crc))
+ if m.Data != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintSnap(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeFixed64Snap(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Snap(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintSnap(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Snapshot) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovSnap(uint64(m.Crc))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovSnap(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovSnap(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozSnap(x uint64) (n int) {
+ return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Snapshot) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+ }
+ m.Crc = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Crc |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSnap
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSnap(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSnap
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipSnap(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthSnap
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipSnap(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto
new file mode 100644
index 0000000000..cd3d21d0ee
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto
@@ -0,0 +1,14 @@
+syntax = "proto2";
+package snappb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message snapshot {
+ optional uint32 crc = 1 [(gogoproto.nullable) = false];
+ optional bytes data = 2;
+}
diff --git a/vendor/src/github.com/coreos/etcd/snap/snapshotter.go b/vendor/src/github.com/coreos/etcd/snap/snapshotter.go
new file mode 100644
index 0000000000..4e06483a88
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/snap/snapshotter.go
@@ -0,0 +1,189 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package snap stores raft nodes' states with snapshots.
+package snap
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io/ioutil"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/coreos/etcd/pkg/pbutil"
+ "github.com/coreos/etcd/raft"
+ "github.com/coreos/etcd/raft/raftpb"
+ "github.com/coreos/etcd/snap/snappb"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+const (
+ snapSuffix = ".snap"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap")
+
+ ErrNoSnapshot = errors.New("snap: no available snapshot")
+ ErrEmptySnapshot = errors.New("snap: empty snapshot")
+ ErrCRCMismatch = errors.New("snap: crc mismatch")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+)
+
+type Snapshotter struct {
+ dir string
+}
+
+func New(dir string) *Snapshotter {
+ return &Snapshotter{
+ dir: dir,
+ }
+}
+
+func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error {
+ if raft.IsEmptySnap(snapshot) {
+ return nil
+ }
+ return s.save(&snapshot)
+}
+
+func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
+ start := time.Now()
+
+ fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix)
+ b := pbutil.MustMarshal(snapshot)
+ crc := crc32.Update(0, crcTable, b)
+ snap := snappb.Snapshot{Crc: crc, Data: b}
+ d, err := snap.Marshal()
+ if err != nil {
+ return err
+ } else {
+ marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+ }
+
+ err = ioutil.WriteFile(path.Join(s.dir, fname), d, 0666)
+ if err == nil {
+ saveDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+ }
+ return err
+}
+
+func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
+ names, err := s.snapNames()
+ if err != nil {
+ return nil, err
+ }
+ var snap *raftpb.Snapshot
+ for _, name := range names {
+ if snap, err = loadSnap(s.dir, name); err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return nil, ErrNoSnapshot
+ }
+ return snap, nil
+}
+
+func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
+ fpath := path.Join(dir, name)
+ snap, err := Read(fpath)
+ if err != nil {
+ renameBroken(fpath)
+ }
+ return snap, err
+}
+
+// Read reads the snapshot named by snapname and returns the snapshot.
+func Read(snapname string) (*raftpb.Snapshot, error) {
+ b, err := ioutil.ReadFile(snapname)
+ if err != nil {
+ plog.Errorf("cannot read file %v: %v", snapname, err)
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ plog.Errorf("unexpected empty snapshot")
+ return nil, ErrEmptySnapshot
+ }
+
+ var serializedSnap snappb.Snapshot
+ if err = serializedSnap.Unmarshal(b); err != nil {
+ plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+ return nil, err
+ }
+
+ if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
+ plog.Errorf("unexpected empty snapshot")
+ return nil, ErrEmptySnapshot
+ }
+
+ crc := crc32.Update(0, crcTable, serializedSnap.Data)
+ if crc != serializedSnap.Crc {
+ plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname)
+ return nil, ErrCRCMismatch
+ }
+
+ var snap raftpb.Snapshot
+ if err = snap.Unmarshal(serializedSnap.Data); err != nil {
+ plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+ return nil, err
+ }
+ return &snap, nil
+}
+
+// snapNames returns the filename of the snapshots in logical time order (from newest to oldest).
+// If there is no available snapshots, an ErrNoSnapshot will be returned.
+func (s *Snapshotter) snapNames() ([]string, error) {
+ dir, err := os.Open(s.dir)
+ if err != nil {
+ return nil, err
+ }
+ defer dir.Close()
+ names, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ snaps := checkSuffix(names)
+ if len(snaps) == 0 {
+ return nil, ErrNoSnapshot
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(snaps)))
+ return snaps, nil
+}
+
+func checkSuffix(names []string) []string {
+ snaps := []string{}
+ for i := range names {
+ if strings.HasSuffix(names[i], snapSuffix) {
+ snaps = append(snaps, names[i])
+ } else {
+ plog.Warningf("skipped unexpected non snapshot file %v", names[i])
+ }
+ }
+ return snaps
+}
+
+func renameBroken(path string) {
+ brokenPath := path + ".broken"
+ if err := os.Rename(path, brokenPath); err != nil {
+ plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err)
+ }
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/decoder.go b/vendor/src/github.com/coreos/etcd/wal/decoder.go
new file mode 100644
index 0000000000..f75c919fba
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/decoder.go
@@ -0,0 +1,103 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bufio"
+ "encoding/binary"
+ "hash"
+ "io"
+ "sync"
+
+ "github.com/coreos/etcd/pkg/crc"
+ "github.com/coreos/etcd/pkg/pbutil"
+ "github.com/coreos/etcd/raft/raftpb"
+ "github.com/coreos/etcd/wal/walpb"
+)
+
+type decoder struct {
+ mu sync.Mutex
+ br *bufio.Reader
+
+ c io.Closer
+ crc hash.Hash32
+}
+
+func newDecoder(rc io.ReadCloser) *decoder {
+ return &decoder{
+ br: bufio.NewReader(rc),
+ c: rc,
+ crc: crc.New(0, crcTable),
+ }
+}
+
+func (d *decoder) decode(rec *walpb.Record) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ rec.Reset()
+ l, err := readInt64(d.br)
+ if err != nil {
+ return err
+ }
+ data := make([]byte, l)
+ if _, err = io.ReadFull(d.br, data); err != nil {
+ // ReadFull returns io.EOF only if no bytes were read
+ // the decoder should treat this as an ErrUnexpectedEOF instead.
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ if err := rec.Unmarshal(data); err != nil {
+ return err
+ }
+ // skip crc checking if the record type is crcType
+ if rec.Type == crcType {
+ return nil
+ }
+ d.crc.Write(rec.Data)
+ return rec.Validate(d.crc.Sum32())
+}
+
+func (d *decoder) updateCRC(prevCrc uint32) {
+ d.crc = crc.New(prevCrc, crcTable)
+}
+
+func (d *decoder) lastCRC() uint32 {
+ return d.crc.Sum32()
+}
+
+func (d *decoder) close() error {
+ return d.c.Close()
+}
+
+func mustUnmarshalEntry(d []byte) raftpb.Entry {
+ var e raftpb.Entry
+ pbutil.MustUnmarshal(&e, d)
+ return e
+}
+
+func mustUnmarshalState(d []byte) raftpb.HardState {
+ var s raftpb.HardState
+ pbutil.MustUnmarshal(&s, d)
+ return s
+}
+
+func readInt64(r io.Reader) (int64, error) {
+ var n int64
+ err := binary.Read(r, binary.LittleEndian, &n)
+ return n, err
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/doc.go b/vendor/src/github.com/coreos/etcd/wal/doc.go
new file mode 100644
index 0000000000..769b522f04
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/doc.go
@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package wal provides an implementation of a write ahead log that is used by
+etcd.
+
+A WAL is created at a particular directory and is made up of a number of
+segmented WAL files. Inside of each file the raft state and entries are appended
+to it with the Save method:
+
+ metadata := []byte{}
+ w, err := wal.Create("/var/lib/etcd", metadata)
+ ...
+ err := w.Save(s, ents)
+
+After saving an raft snapshot to disk, SaveSnapshot method should be called to
+record it. So WAL can match with the saved snapshot when restarting.
+
+ err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
+
+When a user has finished using a WAL it must be closed:
+
+ w.Close()
+
+WAL files are placed inside of the directory in the following format:
+$seq-$index.wal
+
+The first WAL file to be created will be 0000000000000000-0000000000000000.wal
+indicating an initial sequence of 0 and an initial raft index of 0. The first
+entry written to WAL MUST have raft index 0.
+
+WAL will cuts its current wal files if its size exceeds 8MB. This will increment an internal
+sequence number and cause a new file to be created. If the last raft index saved
+was 0x20 and this is the first time cut has been called on this WAL then the sequence will
+increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
+If a second cut issues 0x10 entries with incremental index later then the file will be called:
+0000000000000002-0000000000000031.wal.
+
+At a later time a WAL can be opened at a particular snapshot. If there is no
+snapshot, an empty snapshot should be passed in.
+
+ w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
+ ...
+
+The snapshot must have been written to the WAL.
+
+Additional items cannot be Saved to this WAL until all of the items from the given
+snapshot to the end of the WAL are read first:
+
+ metadata, state, ents, err := w.ReadAll()
+
+This will give you the metadata, the last raft.State and the slice of
+raft.Entry items in the log.
+
+*/
+package wal
diff --git a/vendor/src/github.com/coreos/etcd/wal/encoder.go b/vendor/src/github.com/coreos/etcd/wal/encoder.go
new file mode 100644
index 0000000000..f5b73fe12b
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/encoder.go
@@ -0,0 +1,89 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bufio"
+ "encoding/binary"
+ "hash"
+ "io"
+ "sync"
+
+ "github.com/coreos/etcd/pkg/crc"
+ "github.com/coreos/etcd/wal/walpb"
+)
+
+type encoder struct {
+ mu sync.Mutex
+ bw *bufio.Writer
+
+ crc hash.Hash32
+ buf []byte
+ uint64buf []byte
+}
+
+func newEncoder(w io.Writer, prevCrc uint32) *encoder {
+ return &encoder{
+ bw: bufio.NewWriter(w),
+ crc: crc.New(prevCrc, crcTable),
+ // 1MB buffer
+ buf: make([]byte, 1024*1024),
+ uint64buf: make([]byte, 8),
+ }
+}
+
+func (e *encoder) encode(rec *walpb.Record) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ e.crc.Write(rec.Data)
+ rec.Crc = e.crc.Sum32()
+ var (
+ data []byte
+ err error
+ n int
+ )
+
+ if rec.Size() > len(e.buf) {
+ data, err = rec.Marshal()
+ if err != nil {
+ return err
+ }
+ } else {
+ n, err = rec.MarshalTo(e.buf)
+ if err != nil {
+ return err
+ }
+ data = e.buf[:n]
+ }
+ if err = writeInt64(e.bw, int64(len(data)), e.uint64buf); err != nil {
+ return err
+ }
+ _, err = e.bw.Write(data)
+ return err
+}
+
+func (e *encoder) flush() error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ return e.bw.Flush()
+}
+
+func writeInt64(w io.Writer, n int64, buf []byte) error {
+ // http://golang.org/src/encoding/binary/binary.go
+ binary.LittleEndian.PutUint64(buf, uint64(n))
+ _, err := w.Write(buf)
+ return err
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/metrics.go b/vendor/src/github.com/coreos/etcd/wal/metrics.go
new file mode 100644
index 0000000000..ed270fac63
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/metrics.go
@@ -0,0 +1,38 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "wal",
+ Name: "fsync_durations_seconds",
+ Help: "The latency distributions of fsync called by wal.",
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+ lastIndexSaved = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "wal",
+ Name: "last_index_saved",
+ Help: "The index of the last entry saved by wal.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(syncDurations)
+ prometheus.MustRegister(lastIndexSaved)
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go b/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go
new file mode 100644
index 0000000000..513c6d17d9
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go
@@ -0,0 +1,45 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "io"
+
+type multiReadCloser struct {
+ closers []io.Closer
+ reader io.Reader
+}
+
+func (mc *multiReadCloser) Close() error {
+ var err error
+ for i := range mc.closers {
+ err = mc.closers[i].Close()
+ }
+ return err
+}
+
+func (mc *multiReadCloser) Read(p []byte) (int, error) {
+ return mc.reader.Read(p)
+}
+
+func MultiReadCloser(readClosers ...io.ReadCloser) io.ReadCloser {
+ cs := make([]io.Closer, len(readClosers))
+ rs := make([]io.Reader, len(readClosers))
+ for i := range readClosers {
+ cs[i] = readClosers[i]
+ rs[i] = readClosers[i]
+ }
+ r := io.MultiReader(rs...)
+ return &multiReadCloser{cs, r}
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/repair.go b/vendor/src/github.com/coreos/etcd/wal/repair.go
new file mode 100644
index 0000000000..bcc22ef081
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/repair.go
@@ -0,0 +1,106 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "io"
+ "os"
+ "path"
+
+ "github.com/coreos/etcd/pkg/fileutil"
+ "github.com/coreos/etcd/wal/walpb"
+)
+
+// Repair tries to repair ErrUnexpectedEOF in the
+// last wal file by truncating.
+func Repair(dirpath string) bool {
+ f, err := openLast(dirpath)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+
+ n := 0
+ rec := &walpb.Record{}
+
+ decoder := newDecoder(f)
+ defer decoder.close()
+ for {
+ err := decoder.decode(rec)
+ switch err {
+ case nil:
+ n += 8 + rec.Size()
+ // update crc of the decoder when necessary
+ switch rec.Type {
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return false
+ }
+ decoder.updateCRC(rec.Crc)
+ }
+ continue
+ case io.EOF:
+ return true
+ case io.ErrUnexpectedEOF:
+ plog.Noticef("repairing %v", f.Name())
+ bf, bferr := os.Create(f.Name() + ".broken")
+ if bferr != nil {
+ plog.Errorf("could not repair %v, failed to create backup file", f.Name())
+ return false
+ }
+ defer bf.Close()
+
+ if _, err = f.Seek(0, os.SEEK_SET); err != nil {
+ plog.Errorf("could not repair %v, failed to read file", f.Name())
+ return false
+ }
+
+ if _, err = io.Copy(bf, f); err != nil {
+ plog.Errorf("could not repair %v, failed to copy file", f.Name())
+ return false
+ }
+
+ if err = f.Truncate(int64(n)); err != nil {
+ plog.Errorf("could not repair %v, failed to truncate file", f.Name())
+ return false
+ }
+ if err = f.Sync(); err != nil {
+ plog.Errorf("could not repair %v, failed to sync file", f.Name())
+ return false
+ }
+ return true
+ default:
+ plog.Errorf("could not repair error (%v)", err)
+ return false
+ }
+ }
+}
+
+// openLast opens the last wal file for read and write.
+func openLast(dirpath string) (*os.File, error) {
+ names, err := fileutil.ReadDir(dirpath)
+ if err != nil {
+ return nil, err
+ }
+ names = checkWalNames(names)
+ if len(names) == 0 {
+ return nil, ErrFileNotFound
+ }
+ last := path.Join(dirpath, names[len(names)-1])
+ return os.OpenFile(last, os.O_RDWR, 0)
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/util.go b/vendor/src/github.com/coreos/etcd/wal/util.go
new file mode 100644
index 0000000000..9588b6ec08
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/util.go
@@ -0,0 +1,93 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/coreos/etcd/pkg/fileutil"
+)
+
+var (
+ badWalName = errors.New("bad wal name")
+)
+
+func Exist(dirpath string) bool {
+ names, err := fileutil.ReadDir(dirpath)
+ if err != nil {
+ return false
+ }
+ return len(names) != 0
+}
+
+// searchIndex returns the last array index of names whose raft index section is
+// equal to or smaller than the given index.
+// The given names MUST be sorted.
+func searchIndex(names []string, index uint64) (int, bool) {
+ for i := len(names) - 1; i >= 0; i-- {
+ name := names[i]
+ _, curIndex, err := parseWalName(name)
+ if err != nil {
+ plog.Panicf("parse correct name should never fail: %v", err)
+ }
+ if index >= curIndex {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+// names should have been sorted based on sequence number.
+// isValidSeq checks whether seq increases continuously.
+func isValidSeq(names []string) bool {
+ var lastSeq uint64
+ for _, name := range names {
+ curSeq, _, err := parseWalName(name)
+ if err != nil {
+ plog.Panicf("parse correct name should never fail: %v", err)
+ }
+ if lastSeq != 0 && lastSeq != curSeq-1 {
+ return false
+ }
+ lastSeq = curSeq
+ }
+ return true
+}
+
+func checkWalNames(names []string) []string {
+ wnames := make([]string, 0)
+ for _, name := range names {
+ if _, _, err := parseWalName(name); err != nil {
+ plog.Warningf("ignored file %v in wal", name)
+ continue
+ }
+ wnames = append(wnames, name)
+ }
+ return wnames
+}
+
+func parseWalName(str string) (seq, index uint64, err error) {
+ if !strings.HasSuffix(str, ".wal") {
+ return 0, 0, badWalName
+ }
+ _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+ return seq, index, err
+}
+
+func walName(seq, index uint64) string {
+ return fmt.Sprintf("%016x-%016x.wal", seq, index)
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/wal.go b/vendor/src/github.com/coreos/etcd/wal/wal.go
new file mode 100644
index 0000000000..f9a58ca38b
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/wal.go
@@ -0,0 +1,562 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path"
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/coreos/etcd/pkg/fileutil"
+ "github.com/coreos/etcd/pkg/pbutil"
+ "github.com/coreos/etcd/raft"
+ "github.com/coreos/etcd/raft/raftpb"
+ "github.com/coreos/etcd/wal/walpb"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+const (
+ metadataType int64 = iota + 1
+ entryType
+ stateType
+ crcType
+ snapshotType
+
+ // the owner can make/remove files inside the directory
+ privateDirMode = 0700
+
+ // the expected size of each wal segment file.
+ // the actual size might be bigger than it.
+ segmentSizeBytes = 64 * 1000 * 1000 // 64MB
+)
+
+var (
+ plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal")
+
+ ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+ ErrFileNotFound = errors.New("wal: file not found")
+ ErrCRCMismatch = errors.New("wal: crc mismatch")
+ ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+ ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+)
+
+// WAL is a logical representation of the stable storage.
+// WAL is either in read mode or append mode but not both.
+// A newly created WAL is in append mode, and ready for appending records.
+// A just opened WAL is in read mode, and ready for reading records.
+// The WAL will be ready for appending after reading out all the previous records.
+type WAL struct {
+ dir string // the living directory of the underlay files
+ metadata []byte // metadata recorded at the head of each WAL
+ state raftpb.HardState // hardstate recorded at the head of WAL
+
+ start walpb.Snapshot // snapshot to start reading
+ decoder *decoder // decoder to decode records
+
+ mu sync.Mutex
+ f *os.File // underlay file opened for appending, sync
+ seq uint64 // sequence of the wal file currently used for writes
+ enti uint64 // index of the last entry saved to the wal
+ encoder *encoder // encoder to encode records
+
+ locks []fileutil.Lock // the file locks the WAL is holding (the name is increasing)
+}
+
+// Create creates a WAL ready for appending records. The given metadata is
+// recorded at the head of each WAL file, and can be retrieved with ReadAll.
+func Create(dirpath string, metadata []byte) (*WAL, error) {
+ if Exist(dirpath) {
+ return nil, os.ErrExist
+ }
+
+ if err := os.MkdirAll(dirpath, privateDirMode); err != nil {
+ return nil, err
+ }
+
+ p := path.Join(dirpath, walName(0, 0))
+ f, err := os.OpenFile(p, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
+ if err != nil {
+ return nil, err
+ }
+ l, err := fileutil.NewLock(f.Name())
+ if err != nil {
+ return nil, err
+ }
+ if err = l.Lock(); err != nil {
+ return nil, err
+ }
+
+ w := &WAL{
+ dir: dirpath,
+ metadata: metadata,
+ seq: 0,
+ f: f,
+ encoder: newEncoder(f, 0),
+ }
+ w.locks = append(w.locks, l)
+ if err := w.saveCrc(0); err != nil {
+ return nil, err
+ }
+ if err := w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
+ return nil, err
+ }
+ if err := w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+ return nil, err
+ }
+ return w, nil
+}
+
+// Open opens the WAL at the given snap.
+// The snap SHOULD have been previously saved to the WAL, or the following
+// ReadAll will fail.
+// The returned WAL is ready to read and the first record will be the one after
+// the given snap. The WAL cannot be appended to before reading out all of its
+// previous records.
+func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ return openAtIndex(dirpath, snap, true)
+}
+
+// OpenForRead only opens the wal files for read.
+// Write on a read only wal panics.
+func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ return openAtIndex(dirpath, snap, false)
+}
+
+func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
+ names, err := fileutil.ReadDir(dirpath)
+ if err != nil {
+ return nil, err
+ }
+ names = checkWalNames(names)
+ if len(names) == 0 {
+ return nil, ErrFileNotFound
+ }
+
+ nameIndex, ok := searchIndex(names, snap.Index)
+ if !ok || !isValidSeq(names[nameIndex:]) {
+ return nil, ErrFileNotFound
+ }
+
+ // open the wal files for reading
+ rcs := make([]io.ReadCloser, 0)
+ ls := make([]fileutil.Lock, 0)
+ for _, name := range names[nameIndex:] {
+ f, err := os.Open(path.Join(dirpath, name))
+ if err != nil {
+ return nil, err
+ }
+ l, err := fileutil.NewLock(f.Name())
+ if err != nil {
+ return nil, err
+ }
+ err = l.TryLock()
+ if err != nil {
+ if write {
+ return nil, err
+ }
+ }
+ rcs = append(rcs, f)
+ ls = append(ls, l)
+ }
+ rc := MultiReadCloser(rcs...)
+
+ // create a WAL ready for reading
+ w := &WAL{
+ dir: dirpath,
+ start: snap,
+ decoder: newDecoder(rc),
+ locks: ls,
+ }
+
+ if write {
+ // open the last wal file for appending
+ seq, _, err := parseWalName(names[len(names)-1])
+ if err != nil {
+ rc.Close()
+ return nil, err
+ }
+ last := path.Join(dirpath, names[len(names)-1])
+
+ f, err := os.OpenFile(last, os.O_WRONLY|os.O_APPEND, 0)
+ if err != nil {
+ rc.Close()
+ return nil, err
+ }
+ err = fileutil.Preallocate(f, segmentSizeBytes)
+ if err != nil {
+ rc.Close()
+ plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
+ return nil, err
+ }
+
+ w.f = f
+ w.seq = seq
+ }
+
+ return w, nil
+}
+
+// ReadAll reads out records of the current WAL.
+// If opened in write mode, it must read out all records until EOF. Or an error
+// will be returned.
+// If opened in read mode, it will try to read all records if possible.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If loaded snap doesn't match with the expected one, it will return
+// all the records and error ErrSnapshotMismatch.
+// TODO: detect not-last-snap error.
+// TODO: maybe loose the checking of match.
+// After ReadAll, the WAL will be ready for appending new records.
+func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{}
+ decoder := w.decoder
+
+ var match bool
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case entryType:
+ e := mustUnmarshalEntry(rec.Data)
+ if e.Index > w.start.Index {
+ ents = append(ents[:e.Index-w.start.Index-1], e)
+ }
+ w.enti = e.Index
+ case stateType:
+ state = mustUnmarshalState(rec.Data)
+ case metadataType:
+ if metadata != nil && !reflect.DeepEqual(metadata, rec.Data) {
+ state.Reset()
+ return nil, state, nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ state.Reset()
+ return nil, state, nil, ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+ case snapshotType:
+ var snap walpb.Snapshot
+ pbutil.MustUnmarshal(&snap, rec.Data)
+ if snap.Index == w.start.Index {
+ if snap.Term != w.start.Term {
+ state.Reset()
+ return nil, state, nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+ default:
+ state.Reset()
+ return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ switch w.f {
+ case nil:
+ // We do not have to read out all entries in read mode.
+ // The last record maybe a partial written one, so
+ // ErrunexpectedEOF might be returned.
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ default:
+ // We must read all of the entries if WAL is opened in write mode.
+ if err != io.EOF {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ }
+
+ err = nil
+ if !match {
+ err = ErrSnapshotNotFound
+ }
+
+ // close decoder, disable reading
+ w.decoder.close()
+ w.start = walpb.Snapshot{}
+
+ w.metadata = metadata
+
+ if w.f != nil {
+ // create encoder (chain crc with the decoder), enable appending
+ w.encoder = newEncoder(w.f, w.decoder.lastCRC())
+ w.decoder = nil
+ lastIndexSaved.Set(float64(w.enti))
+ }
+
+ return metadata, state, ents, err
+}
+
+// cut closes current file written and creates a new one ready to append.
+// cut first creates a temp wal file and writes necessary headers into it.
+// Then cut atomically rename temp wal file to a wal file.
+func (w *WAL) cut() error {
+ // close old wal file
+ if err := w.sync(); err != nil {
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+
+ fpath := path.Join(w.dir, walName(w.seq+1, w.enti+1))
+ ftpath := fpath + ".tmp"
+
+ // create a temp wal file with name sequence + 1, or truncate the existing one
+ ft, err := os.OpenFile(ftpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600)
+ if err != nil {
+ return err
+ }
+
+ // update writer and save the previous crc
+ w.f = ft
+ prevCrc := w.encoder.crc.Sum32()
+ w.encoder = newEncoder(w.f, prevCrc)
+ if err = w.saveCrc(prevCrc); err != nil {
+ return err
+ }
+ if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
+ return err
+ }
+ if err = w.saveState(&w.state); err != nil {
+ return err
+ }
+ // close temp wal file
+ if err = w.sync(); err != nil {
+ return err
+ }
+ if err = w.f.Close(); err != nil {
+ return err
+ }
+
+ // atomically move temp wal file to wal file
+ if err = os.Rename(ftpath, fpath); err != nil {
+ return err
+ }
+
+ // open the wal file and update writer again
+ f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND, 0600)
+ if err != nil {
+ return err
+ }
+ if err = fileutil.Preallocate(f, segmentSizeBytes); err != nil {
+ plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
+ return err
+ }
+
+ w.f = f
+ prevCrc = w.encoder.crc.Sum32()
+ w.encoder = newEncoder(w.f, prevCrc)
+
+ // lock the new wal file
+ l, err := fileutil.NewLock(f.Name())
+ if err != nil {
+ return err
+ }
+
+ if err := l.Lock(); err != nil {
+ return err
+ }
+ w.locks = append(w.locks, l)
+
+ // increase the wal seq
+ w.seq++
+
+ plog.Infof("segmented wal file %v is created", fpath)
+ return nil
+}
+
+func (w *WAL) sync() error {
+ if w.encoder != nil {
+ if err := w.encoder.flush(); err != nil {
+ return err
+ }
+ }
+ start := time.Now()
+ err := fileutil.Fdatasync(w.f)
+ syncDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+ return err
+}
+
+// ReleaseLockTo releases the locks, which has smaller index than the given index
+// except the largest one among them.
+// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
+// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
+func (w *WAL) ReleaseLockTo(index uint64) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ var smaller int
+ found := false
+
+ for i, l := range w.locks {
+ _, lockIndex, err := parseWalName(path.Base(l.Name()))
+ if err != nil {
+ return err
+ }
+ if lockIndex >= index {
+ smaller = i - 1
+ found = true
+ break
+ }
+ }
+
+ // if no lock index is greater than the release index, we can
+ // release lock up to the last one(excluding).
+ if !found && len(w.locks) != 0 {
+ smaller = len(w.locks) - 1
+ }
+
+ if smaller <= 0 {
+ return nil
+ }
+
+ for i := 0; i < smaller; i++ {
+ w.locks[i].Unlock()
+ w.locks[i].Destroy()
+ }
+ w.locks = w.locks[smaller:]
+
+ return nil
+}
+
+func (w *WAL) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.f != nil {
+ if err := w.sync(); err != nil {
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ }
+ for _, l := range w.locks {
+ err := l.Unlock()
+ if err != nil {
+ plog.Errorf("failed to unlock during closing wal: %s", err)
+ }
+ err = l.Destroy()
+ if err != nil {
+ plog.Errorf("failed to destroy lock during closing wal: %s", err)
+ }
+ }
+ return nil
+}
+
+func (w *WAL) saveEntry(e *raftpb.Entry) error {
+ // TODO: add MustMarshalTo to reduce one allocation.
+ b := pbutil.MustMarshal(e)
+ rec := &walpb.Record{Type: entryType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ w.enti = e.Index
+ lastIndexSaved.Set(float64(w.enti))
+ return nil
+}
+
+func (w *WAL) saveState(s *raftpb.HardState) error {
+ if raft.IsEmptyHardState(*s) {
+ return nil
+ }
+ w.state = *s
+ b := pbutil.MustMarshal(s)
+ rec := &walpb.Record{Type: stateType, Data: b}
+ return w.encoder.encode(rec)
+}
+
+func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ // short cut, do not call sync
+ if raft.IsEmptyHardState(st) && len(ents) == 0 {
+ return nil
+ }
+
+ mustSync := mustSync(st, w.state, len(ents))
+
+ // TODO(xiangli): no more reference operator
+ for i := range ents {
+ if err := w.saveEntry(&ents[i]); err != nil {
+ return err
+ }
+ }
+ if err := w.saveState(&st); err != nil {
+ return err
+ }
+
+ fstat, err := w.f.Stat()
+ if err != nil {
+ return err
+ }
+ if fstat.Size() < segmentSizeBytes {
+ if mustSync {
+ return w.sync()
+ }
+ return nil
+ }
+ // TODO: add a test for this code path when refactoring the tests
+ return w.cut()
+}
+
+func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ b := pbutil.MustMarshal(&e)
+ rec := &walpb.Record{Type: snapshotType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ // update enti only when snapshot is ahead of last index
+ if w.enti < e.Index {
+ w.enti = e.Index
+ }
+ lastIndexSaved.Set(float64(w.enti))
+ return w.sync()
+}
+
+func (w *WAL) saveCrc(prevCrc uint32) error {
+ return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
+}
+
+func mustSync(st, prevst raftpb.HardState, entsnum int) bool {
+ // Persistent state on all servers:
+ // (Updated on stable storage before responding to RPCs)
+ // currentTerm
+ // votedFor
+ // log entries[]
+ if entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term {
+ return true
+ }
+ return false
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.go b/vendor/src/github.com/coreos/etcd/wal/walpb/record.go
new file mode 100644
index 0000000000..bb53685697
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/walpb/record.go
@@ -0,0 +1,29 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package walpb
+
+import "errors"
+
+var (
+ ErrCRCMismatch = errors.New("walpb: crc mismatch")
+)
+
+func (rec *Record) Validate(crc uint32) error {
+ if rec.Crc == crc {
+ return nil
+ }
+ rec.Reset()
+ return ErrCRCMismatch
+}
diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go
new file mode 100644
index 0000000000..638bdc3b69
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go
@@ -0,0 +1,495 @@
+// Code generated by protoc-gen-gogo.
+// source: record.proto
+// DO NOT EDIT!
+
+/*
+ Package walpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ record.proto
+
+ It has these top-level messages:
+ Record
+ Snapshot
+*/
+package walpb
+
+import (
+ "fmt"
+
+ proto "github.com/gogo/protobuf/proto"
+)
+
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Record struct {
+ Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
+ Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Record) Reset() { *m = Record{} }
+func (m *Record) String() string { return proto.CompactTextString(m) }
+func (*Record) ProtoMessage() {}
+
+type Snapshot struct {
+ Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*Record)(nil), "walpb.Record")
+ proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
+}
+func (m *Record) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Record) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintRecord(data, i, uint64(m.Type))
+ data[i] = 0x10
+ i++
+ i = encodeVarintRecord(data, i, uint64(m.Crc))
+ if m.Data != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintRecord(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Snapshot) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintRecord(data, i, uint64(m.Index))
+ data[i] = 0x10
+ i++
+ i = encodeVarintRecord(data, i, uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeFixed64Record(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Record(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintRecord(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Record) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRecord(uint64(m.Type))
+ n += 1 + sovRecord(uint64(m.Crc))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRecord(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Snapshot) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRecord(uint64(m.Index))
+ n += 1 + sovRecord(uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovRecord(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRecord(x uint64) (n int) {
+ return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Record) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Record: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+ }
+ m.Crc = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Crc |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRecord
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRecord(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Snapshot) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRecord(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRecord(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRecord
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRecord(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto
new file mode 100644
index 0000000000..b694cb2338
--- /dev/null
+++ b/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto
@@ -0,0 +1,20 @@
+syntax = "proto2";
+package walpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Record {
+ optional int64 type = 1 [(gogoproto.nullable) = false];
+ optional uint32 crc = 2 [(gogoproto.nullable) = false];
+ optional bytes data = 3;
+}
+
+message Snapshot {
+ optional uint64 index = 1 [(gogoproto.nullable) = false];
+ optional uint64 term = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/src/github.com/coreos/pkg/LICENSE b/vendor/src/github.com/coreos/pkg/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/README.md b/vendor/src/github.com/coreos/pkg/capnslog/README.md
new file mode 100644
index 0000000000..81efb1fb6a
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/README.md
@@ -0,0 +1,39 @@
+# capnslog, the CoreOS logging package
+
+There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
+capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
+
+### Design Principles
+
+##### `package main` is the place where logging gets turned on and routed
+
+A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
+
+##### All log options are runtime-configurable.
+
+Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
+
+##### There is one log object per package. It is registered under its repository and package name.
+
+`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
+
+##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
+
+Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
+
+Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application.
+
+##### Log objects are an interface
+
+An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
+
+##### Log levels have specific meanings:
+
+ * Critical: Unrecoverable. Must fail.
+ * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
+ * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
+ * Notice: Normal, but important (uncommon) log information.
+ * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
+ * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
+ * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
+
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/formatters.go b/vendor/src/github.com/coreos/pkg/capnslog/formatters.go
new file mode 100644
index 0000000000..99ec6f824b
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/formatters.go
@@ -0,0 +1,106 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "time"
+)
+
+type Formatter interface {
+ Format(pkg string, level LogLevel, depth int, entries ...interface{})
+ Flush()
+}
+
+func NewStringFormatter(w io.Writer) *StringFormatter {
+ return &StringFormatter{
+ w: bufio.NewWriter(w),
+ }
+}
+
+type StringFormatter struct {
+ w *bufio.Writer
+}
+
+func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
+ now := time.Now().UTC()
+ s.w.WriteString(now.Format(time.RFC3339))
+ s.w.WriteByte(' ')
+ writeEntries(s.w, pkg, l, i, entries...)
+ s.Flush()
+}
+
+func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
+ if pkg != "" {
+ w.WriteString(pkg + ": ")
+ }
+ str := fmt.Sprint(entries...)
+ endsInNL := strings.HasSuffix(str, "\n")
+ w.WriteString(str)
+ if !endsInNL {
+ w.WriteString("\n")
+ }
+}
+
+func (s *StringFormatter) Flush() {
+ s.w.Flush()
+}
+
+func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
+ return &PrettyFormatter{
+ w: bufio.NewWriter(w),
+ debug: debug,
+ }
+}
+
+type PrettyFormatter struct {
+ w *bufio.Writer
+ debug bool
+}
+
+func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
+ now := time.Now()
+ ts := now.Format("2006-01-02 15:04:05")
+ c.w.WriteString(ts)
+ ms := now.Nanosecond() / 1000
+ c.w.WriteString(fmt.Sprintf(".%06d", ms))
+ if c.debug {
+ _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ if line < 0 {
+ line = 0 // not a real line number
+ }
+ c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
+ }
+ c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
+ writeEntries(c.w, pkg, l, depth, entries...)
+ c.Flush()
+}
+
+func (c *PrettyFormatter) Flush() {
+ c.w.Flush()
+}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go
new file mode 100644
index 0000000000..426603ef30
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go
@@ -0,0 +1,96 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var pid = os.Getpid()
+
+type GlogFormatter struct {
+ StringFormatter
+}
+
+func NewGlogFormatter(w io.Writer) *GlogFormatter {
+ g := &GlogFormatter{}
+ g.w = bufio.NewWriter(w)
+ return g
+}
+
+func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
+ g.w.Write(GlogHeader(level, depth+1))
+ g.StringFormatter.Format(pkg, level, depth+1, entries...)
+}
+
+func GlogHeader(level LogLevel, depth int) []byte {
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ now := time.Now().UTC()
+ _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ if line < 0 {
+ line = 0 // not a real line number
+ }
+ buf := &bytes.Buffer{}
+ buf.Grow(30)
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ buf.WriteString(level.Char())
+ twoDigits(buf, int(month))
+ twoDigits(buf, day)
+ buf.WriteByte(' ')
+ twoDigits(buf, hour)
+ buf.WriteByte(':')
+ twoDigits(buf, minute)
+ buf.WriteByte(':')
+ twoDigits(buf, second)
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
+ buf.WriteByte('Z')
+ buf.WriteByte(' ')
+ buf.WriteString(strconv.Itoa(pid))
+ buf.WriteByte(' ')
+ buf.WriteString(file)
+ buf.WriteByte(':')
+ buf.WriteString(strconv.Itoa(line))
+ buf.WriteByte(']')
+ buf.WriteByte(' ')
+ return buf.Bytes()
+}
+
+const digits = "0123456789"
+
+func twoDigits(b *bytes.Buffer, d int) {
+ c2 := digits[d%10]
+ d /= 10
+ c1 := digits[d%10]
+ b.WriteByte(c1)
+ b.WriteByte(c2)
+}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/init.go b/vendor/src/github.com/coreos/pkg/capnslog/init.go
new file mode 100644
index 0000000000..44b8cd361b
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/init.go
@@ -0,0 +1,49 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+// Here's where the opinionation comes in. We need some sensible defaults,
+// especially after taking over the log package. Your project (whatever it may
+// be) may see things differently. That's okay; there should be no defaults in
+// the main package that cannot be controlled or overridden programatically,
+// otherwise it's a bug. Doing so is creating your own init_log.go file much
+// like this one.
+
+func init() {
+ initHijack()
+
+ // Go `log` pacakge uses os.Stderr.
+ SetFormatter(NewDefaultFormatter(os.Stderr))
+ SetGlobalLogLevel(INFO)
+}
+
+func NewDefaultFormatter(out io.Writer) Formatter {
+ if syscall.Getppid() == 1 {
+ // We're running under init, which may be systemd.
+ f, err := NewJournaldFormatter()
+ if err == nil {
+ return f
+ }
+ }
+ return NewPrettyFormatter(out, false)
+}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go
new file mode 100644
index 0000000000..4553050653
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import "os"
+
+func init() {
+ initHijack()
+
+ // Go `log` package uses os.Stderr.
+ SetFormatter(NewPrettyFormatter(os.Stderr, false))
+ SetGlobalLogLevel(INFO)
+}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go
new file mode 100644
index 0000000000..72e05207c5
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go
@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/coreos/go-systemd/journal"
+)
+
+func NewJournaldFormatter() (Formatter, error) {
+ if !journal.Enabled() {
+ return nil, errors.New("No systemd detected")
+ }
+ return &journaldFormatter{}, nil
+}
+
+type journaldFormatter struct{}
+
+func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+ var pri journal.Priority
+ switch l {
+ case CRITICAL:
+ pri = journal.PriCrit
+ case ERROR:
+ pri = journal.PriErr
+ case WARNING:
+ pri = journal.PriWarning
+ case NOTICE:
+ pri = journal.PriNotice
+ case INFO:
+ pri = journal.PriInfo
+ case DEBUG:
+ pri = journal.PriDebug
+ case TRACE:
+ pri = journal.PriDebug
+ default:
+ panic("Unhandled loglevel")
+ }
+ msg := fmt.Sprint(entries...)
+ tags := map[string]string{
+ "PACKAGE": pkg,
+ "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+ }
+ err := journal.Send(msg, pri, tags)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+}
+
+func (j *journaldFormatter) Flush() {}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go
new file mode 100644
index 0000000000..970086b9f9
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go
@@ -0,0 +1,39 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "log"
+)
+
+func initHijack() {
+ pkg := NewPackageLogger("log", "")
+ w := packageWriter{pkg}
+ log.SetFlags(0)
+ log.SetPrefix("")
+ log.SetOutput(w)
+}
+
+type packageWriter struct {
+ pl *PackageLogger
+}
+
+func (p packageWriter) Write(b []byte) (int, error) {
+ if p.pl.level < INFO {
+ return 0, nil
+ }
+ p.pl.internalLog(calldepth+2, INFO, string(b))
+ return len(b), nil
+}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/logmap.go b/vendor/src/github.com/coreos/pkg/capnslog/logmap.go
new file mode 100644
index 0000000000..8495448830
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/logmap.go
@@ -0,0 +1,240 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "errors"
+ "strings"
+ "sync"
+)
+
+// LogLevel is the set of all log levels.
+type LogLevel int8
+
+const (
+ // CRITICAL is the lowest log level; only errors which will end the program will be propagated.
+ CRITICAL LogLevel = iota - 1
+ // ERROR is for errors that are not fatal but lead to troubling behavior.
+ ERROR
+ // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
+ WARNING
+ // NOTICE is for normal but significant conditions.
+ NOTICE
+ // INFO is a log level for common, everyday log updates.
+ INFO
+ // DEBUG is the default hidden level for more verbose updates about internal processes.
+ DEBUG
+ // TRACE is for (potentially) call by call tracing of programs.
+ TRACE
+)
+
+// Char returns a single-character representation of the log level.
+func (l LogLevel) Char() string {
+ switch l {
+ case CRITICAL:
+ return "C"
+ case ERROR:
+ return "E"
+ case WARNING:
+ return "W"
+ case NOTICE:
+ return "N"
+ case INFO:
+ return "I"
+ case DEBUG:
+ return "D"
+ case TRACE:
+ return "T"
+ default:
+ panic("Unhandled loglevel")
+ }
+}
+
+// String returns a multi-character representation of the log level.
+func (l LogLevel) String() string {
+ switch l {
+ case CRITICAL:
+ return "CRITICAL"
+ case ERROR:
+ return "ERROR"
+ case WARNING:
+ return "WARNING"
+ case NOTICE:
+ return "NOTICE"
+ case INFO:
+ return "INFO"
+ case DEBUG:
+ return "DEBUG"
+ case TRACE:
+ return "TRACE"
+ default:
+ panic("Unhandled loglevel")
+ }
+}
+
+// Update using the given string value. Fulfills the flag.Value interface.
+func (l *LogLevel) Set(s string) error {
+ value, err := ParseLevel(s)
+ if err != nil {
+ return err
+ }
+
+ *l = value
+ return nil
+}
+
+// ParseLevel translates some potential loglevel strings into their corresponding levels.
+func ParseLevel(s string) (LogLevel, error) {
+ switch s {
+ case "CRITICAL", "C":
+ return CRITICAL, nil
+ case "ERROR", "0", "E":
+ return ERROR, nil
+ case "WARNING", "1", "W":
+ return WARNING, nil
+ case "NOTICE", "2", "N":
+ return NOTICE, nil
+ case "INFO", "3", "I":
+ return INFO, nil
+ case "DEBUG", "4", "D":
+ return DEBUG, nil
+ case "TRACE", "5", "T":
+ return TRACE, nil
+ }
+ return CRITICAL, errors.New("couldn't parse log level " + s)
+}
+
+type RepoLogger map[string]*PackageLogger
+
+type loggerStruct struct {
+ sync.Mutex
+ repoMap map[string]RepoLogger
+ formatter Formatter
+}
+
+// logger is the global logger
+var logger = new(loggerStruct)
+
+// SetGlobalLogLevel sets the log level for all packages in all repositories
+// registered with capnslog.
+func SetGlobalLogLevel(l LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ for _, r := range logger.repoMap {
+ r.setRepoLogLevelInternal(l)
+ }
+}
+
+// GetRepoLogger may return the handle to the repository's set of packages' loggers.
+func GetRepoLogger(repo string) (RepoLogger, error) {
+ logger.Lock()
+ defer logger.Unlock()
+ r, ok := logger.repoMap[repo]
+ if !ok {
+ return nil, errors.New("no packages registered for repo " + repo)
+ }
+ return r, nil
+}
+
+// MustRepoLogger returns the handle to the repository's packages' loggers.
+func MustRepoLogger(repo string) RepoLogger {
+ r, err := GetRepoLogger(repo)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// SetRepoLogLevel sets the log level for all packages in the repository.
+func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ r.setRepoLogLevelInternal(l)
+}
+
+func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
+ for _, v := range r {
+ v.level = l
+ }
+}
+
+// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
+// order, and returns a map of the results, for use in SetLogLevel.
+func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
+ setlist := strings.Split(conf, ",")
+ out := make(map[string]LogLevel)
+ for _, setstring := range setlist {
+ setting := strings.Split(setstring, "=")
+ if len(setting) != 2 {
+ return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
+ }
+ l, err := ParseLevel(setting[1])
+ if err != nil {
+ return nil, err
+ }
+ out[setting[0]] = l
+ }
+ return out, nil
+}
+
+// SetLogLevel takes a map of package names within a repository to their desired
+// loglevel, and sets the levels appropriately. Unknown packages are ignored.
+// "*" is a special package name that corresponds to all packages, and will be
+// processed first.
+func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ if l, ok := m["*"]; ok {
+ r.setRepoLogLevelInternal(l)
+ }
+ for k, v := range m {
+ l, ok := r[k]
+ if !ok {
+ continue
+ }
+ l.level = v
+ }
+}
+
+// SetFormatter sets the formatting function for all logs.
+func SetFormatter(f Formatter) {
+ logger.Lock()
+ defer logger.Unlock()
+ logger.formatter = f
+}
+
+// NewPackageLogger creates a package logger object.
+// This should be defined as a global var in your package, referencing your repo.
+func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
+ logger.Lock()
+ defer logger.Unlock()
+ if logger.repoMap == nil {
+ logger.repoMap = make(map[string]RepoLogger)
+ }
+ r, rok := logger.repoMap[repo]
+ if !rok {
+ logger.repoMap[repo] = make(RepoLogger)
+ r = logger.repoMap[repo]
+ }
+ p, pok := r[pkg]
+ if !pok {
+ r[pkg] = &PackageLogger{
+ pkg: pkg,
+ level: INFO,
+ }
+ p = r[pkg]
+ }
+ return
+}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go
new file mode 100644
index 0000000000..32d2f16a98
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go
@@ -0,0 +1,158 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "fmt"
+ "os"
+)
+
+type PackageLogger struct {
+ pkg string
+ level LogLevel
+}
+
+const calldepth = 2
+
+func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
+ if inLevel != CRITICAL && p.level < inLevel {
+ return
+ }
+ logger.Lock()
+ defer logger.Unlock()
+ if logger.formatter != nil {
+ logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
+ }
+}
+
+func (p *PackageLogger) LevelAt(l LogLevel) bool {
+ return p.level >= l
+}
+
+// Log a formatted string at any level between ERROR and TRACE
+func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
+ p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
+}
+
+// Log a message at any level between ERROR and TRACE
+func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
+ p.internalLog(calldepth, l, fmt.Sprint(args...))
+}
+
+// log stdlib compatibility
+
+func (p *PackageLogger) Println(args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
+}
+
+func (p *PackageLogger) Printf(format string, args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Print(args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprint(args...))
+}
+
+// Panic and fatal
+
+func (p *PackageLogger) Panicf(format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ panic(s)
+}
+
+func (p *PackageLogger) Panic(args ...interface{}) {
+ s := fmt.Sprint(args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ panic(s)
+}
+
+func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ os.Exit(1)
+}
+
+func (p *PackageLogger) Fatal(args ...interface{}) {
+ s := fmt.Sprint(args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ os.Exit(1)
+}
+
+// Error Functions
+
+func (p *PackageLogger) Errorf(format string, args ...interface{}) {
+ p.internalLog(calldepth, ERROR, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Error(entries ...interface{}) {
+ p.internalLog(calldepth, ERROR, entries...)
+}
+
+// Warning Functions
+
+func (p *PackageLogger) Warningf(format string, args ...interface{}) {
+ p.internalLog(calldepth, WARNING, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Warning(entries ...interface{}) {
+ p.internalLog(calldepth, WARNING, entries...)
+}
+
+// Notice Functions
+
+func (p *PackageLogger) Noticef(format string, args ...interface{}) {
+ p.internalLog(calldepth, NOTICE, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Notice(entries ...interface{}) {
+ p.internalLog(calldepth, NOTICE, entries...)
+}
+
+// Info Functions
+
+func (p *PackageLogger) Infof(format string, args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Info(entries ...interface{}) {
+ p.internalLog(calldepth, INFO, entries...)
+}
+
+// Debug Functions
+
+func (p *PackageLogger) Debugf(format string, args ...interface{}) {
+ p.internalLog(calldepth, DEBUG, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Debug(entries ...interface{}) {
+ p.internalLog(calldepth, DEBUG, entries...)
+}
+
+// Trace Functions
+
+func (p *PackageLogger) Tracef(format string, args ...interface{}) {
+ p.internalLog(calldepth, TRACE, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Trace(entries ...interface{}) {
+ p.internalLog(calldepth, TRACE, entries...)
+}
+
+func (p *PackageLogger) Flush() {
+ logger.Lock()
+ defer logger.Unlock()
+ logger.formatter.Flush()
+}
diff --git a/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go
new file mode 100644
index 0000000000..4be5a1f2de
--- /dev/null
+++ b/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go
@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "fmt"
+ "log/syslog"
+)
+
+func NewSyslogFormatter(w *syslog.Writer) Formatter {
+ return &syslogFormatter{w}
+}
+
+func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
+ w, err := syslog.New(syslog.LOG_DEBUG, tag)
+ if err != nil {
+ return nil, err
+ }
+ return NewSyslogFormatter(w), nil
+}
+
+type syslogFormatter struct {
+ w *syslog.Writer
+}
+
+func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+ for _, entry := range entries {
+ str := fmt.Sprint(entry)
+ switch l {
+ case CRITICAL:
+ s.w.Crit(str)
+ case ERROR:
+ s.w.Err(str)
+ case WARNING:
+ s.w.Warning(str)
+ case NOTICE:
+ s.w.Notice(str)
+ case INFO:
+ s.w.Info(str)
+ case DEBUG:
+ s.w.Debug(str)
+ case TRACE:
+ s.w.Debug(str)
+ default:
+ panic("Unhandled loglevel")
+ }
+ }
+}
+
+func (s *syslogFormatter) Flush() {
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/client.go b/vendor/src/github.com/docker/engine-api/client/client.go
index 8c8c6fd182..f3ad2cf30d 100644
--- a/vendor/src/github.com/docker/engine-api/client/client.go
+++ b/vendor/src/github.com/docker/engine-api/client/client.go
@@ -12,6 +12,9 @@ import (
"github.com/docker/go-connections/tlsconfig"
)
+// DefaultVersion is the version of the current stable API
+const DefaultVersion string = "1.23"
+
// Client is the API client that performs all operations
// against a docker server.
type Client struct {
@@ -59,13 +62,22 @@ func NewEnvClient() (*Client, error) {
if host == "" {
host = DefaultDockerHost
}
- return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil)
+
+ version := os.Getenv("DOCKER_API_VERSION")
+ if version == "" {
+ version = DefaultVersion
+ }
+
+ return NewClient(host, version, client, nil)
}
// NewClient initializes a new API client for the given host and API version.
-// It won't send any version information if the version number is empty.
// It uses the given http client as transport.
// It also initializes the custom http headers to add to each request.
+//
+// It won't send any version information if the version number is empty. It is
+// highly recommended that you set a version or your client may break if the
+// server is upgraded.
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
proto, addr, basePath, err := ParseHost(host)
if err != nil {
diff --git a/vendor/src/github.com/docker/engine-api/client/errors.go b/vendor/src/github.com/docker/engine-api/client/errors.go
index bd14935c98..be9f595ef8 100644
--- a/vendor/src/github.com/docker/engine-api/client/errors.go
+++ b/vendor/src/github.com/docker/engine-api/client/errors.go
@@ -120,3 +120,54 @@ func IsErrUnauthorized(err error) bool {
_, ok := err.(unauthorizedError)
return ok
}
+
+// nodeNotFoundError implements an error returned when a node is not found.
+type nodeNotFoundError struct {
+ nodeID string
+}
+
+// Error returns a string representation of a nodeNotFoundError
+func (e nodeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such node: %s", e.nodeID)
+}
+
+// IsErrNodeNotFound returns true if the error is caused
+// when a node is not found.
+func IsErrNodeNotFound(err error) bool {
+ _, ok := err.(nodeNotFoundError)
+ return ok
+}
+
+// serviceNotFoundError implements an error returned when a service is not found.
+type serviceNotFoundError struct {
+ serviceID string
+}
+
+// Error returns a string representation of a serviceNotFoundError
+func (e serviceNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such service: %s", e.serviceID)
+}
+
+// IsErrServiceNotFound returns true if the error is caused
+// when a service is not found.
+func IsErrServiceNotFound(err error) bool {
+ _, ok := err.(serviceNotFoundError)
+ return ok
+}
+
+// taskNotFoundError implements an error returned when a task is not found.
+type taskNotFoundError struct {
+ taskID string
+}
+
+// Error returns a string representation of a taskNotFoundError
+func (e taskNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such task: %s", e.taskID)
+}
+
+// IsErrTaskNotFound returns true if the error is caused
+// when a task is not found.
+func IsErrTaskNotFound(err error) bool {
+ _, ok := err.(taskNotFoundError)
+ return ok
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/interface.go b/vendor/src/github.com/docker/engine-api/client/interface.go
index a125916fed..d64619899e 100644
--- a/vendor/src/github.com/docker/engine-api/client/interface.go
+++ b/vendor/src/github.com/docker/engine-api/client/interface.go
@@ -11,6 +11,7 @@ import (
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
"github.com/docker/engine-api/types/registry"
+ "github.com/docker/engine-api/types/swarm"
)
// APIClient is an interface that clients that talk with a docker server must implement.
@@ -19,6 +20,22 @@ type APIClient interface {
CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
CheckpointDelete(ctx context.Context, container string, checkpointID string) error
CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error)
+ SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+ SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+ SwarmLeave(ctx context.Context, force bool) error
+ SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+ SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error
+ NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error)
+ NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+ NodeRemove(ctx context.Context, nodeID string) error
+ NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+ ServiceCreate(ctx context.Context, service swarm.ServiceSpec) (types.ServiceCreateResponse, error)
+ ServiceInspect(ctx context.Context, serviceID string) (swarm.Service, error)
+ ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+ ServiceRemove(ctx context.Context, serviceID string) error
+ ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec) error
+ TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+ TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error)
ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error)
diff --git a/vendor/src/github.com/docker/engine-api/client/node_inspect.go b/vendor/src/github.com/docker/engine-api/client/node_inspect.go
new file mode 100644
index 0000000000..91cbfc0bea
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/node_inspect.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeInspect returns the node information.
+func (cli *Client) NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error) {
+ serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Node{}, nodeNotFoundError{nodeID}
+ }
+ return swarm.Node{}, err
+ }
+
+ var response swarm.Node
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/node_list.go b/vendor/src/github.com/docker/engine-api/client/node_list.go
new file mode 100644
index 0000000000..57cf14827d
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/node_list.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeList returns the list of nodes.
+func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+ query := url.Values{}
+
+ if options.Filter.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filter)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/nodes", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var nodes []swarm.Node
+ err = json.NewDecoder(resp.body).Decode(&nodes)
+ ensureReaderClosed(resp)
+ return nodes, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/node_remove.go b/vendor/src/github.com/docker/engine-api/client/node_remove.go
new file mode 100644
index 0000000000..a22ee93f4b
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/node_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// NodeRemove removes a Node.
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string) error {
+ resp, err := cli.delete(ctx, "/nodes/"+nodeID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/node_update.go b/vendor/src/github.com/docker/engine-api/client/node_update.go
new file mode 100644
index 0000000000..4722211517
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/node_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeUpdate updates a Node.
+func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/request.go b/vendor/src/github.com/docker/engine-api/client/request.go
index 67147eec89..854901559b 100644
--- a/vendor/src/github.com/docker/engine-api/client/request.go
+++ b/vendor/src/github.com/docker/engine-api/client/request.go
@@ -12,6 +12,7 @@ import (
"github.com/docker/engine-api/client/transport/cancellable"
"github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/versions"
"golang.org/x/net/context"
)
@@ -133,7 +134,8 @@ func (cli *Client) sendClientRequest(ctx context.Context, method, path string, q
}
var errorMessage string
- if resp.Header.Get("Content-Type") == "application/json" {
+ if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) &&
+ resp.Header.Get("Content-Type") == "application/json" {
var errorResponse types.ErrorResponse
if err := json.Unmarshal(body, &errorResponse); err != nil {
return serverResp, fmt.Errorf("Error reading JSON: %v", err)
diff --git a/vendor/src/github.com/docker/engine-api/client/service_create.go b/vendor/src/github.com/docker/engine-api/client/service_create.go
new file mode 100644
index 0000000000..f87851e43f
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/service_create.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceCreate creates a new Service.
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec) (types.ServiceCreateResponse, error) {
+ var response types.ServiceCreateResponse
+ resp, err := cli.post(ctx, "/services/create", nil, service, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/service_inspect.go b/vendor/src/github.com/docker/engine-api/client/service_inspect.go
new file mode 100644
index 0000000000..3dbb8cd560
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/service_inspect.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceInspect returns the service information.
+func (cli *Client) ServiceInspect(ctx context.Context, serviceID string) (swarm.Service, error) {
+ serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Service{}, serviceNotFoundError{serviceID}
+ }
+ return swarm.Service{}, err
+ }
+
+ var response swarm.Service
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/service_list.go b/vendor/src/github.com/docker/engine-api/client/service_list.go
new file mode 100644
index 0000000000..b48964aa0f
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/service_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceList returns the list of services.
+func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+ query := url.Values{}
+
+ if options.Filter.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filter)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/services", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var services []swarm.Service
+ err = json.NewDecoder(resp.body).Decode(&services)
+ ensureReaderClosed(resp)
+ return services, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/service_remove.go b/vendor/src/github.com/docker/engine-api/client/service_remove.go
new file mode 100644
index 0000000000..a9331f92c2
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/service_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ServiceRemove kills and removes a service.
+func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
+ resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/service_update.go b/vendor/src/github.com/docker/engine-api/client/service_update.go
new file mode 100644
index 0000000000..a3f22fafac
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/service_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceUpdate updates a Service.
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_init.go b/vendor/src/github.com/docker/engine-api/client/swarm_init.go
new file mode 100644
index 0000000000..68f0a744a2
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/swarm_init.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInit initializes the Swarm.
+func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+ serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
+ if err != nil {
+ return "", err
+ }
+
+ var response string
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go b/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go
new file mode 100644
index 0000000000..d67c7c010b
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInspect inspects the Swarm.
+func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+ serverResp, err := cli.get(ctx, "/swarm", nil, nil)
+ if err != nil {
+ return swarm.Swarm{}, err
+ }
+
+ var response swarm.Swarm
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_join.go b/vendor/src/github.com/docker/engine-api/client/swarm_join.go
new file mode 100644
index 0000000000..a9b14e0c48
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/swarm_join.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmJoin joins the Swarm.
+func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+ resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_leave.go b/vendor/src/github.com/docker/engine-api/client/swarm_leave.go
new file mode 100644
index 0000000000..a4df732174
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/swarm_leave.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// SwarmLeave leaves the Swarm.
+func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
+ query := url.Values{}
+ if force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_update.go b/vendor/src/github.com/docker/engine-api/client/swarm_update.go
new file mode 100644
index 0000000000..568474af7d
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/swarm_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUpdate updates the Swarm.
+func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/task_inspect.go b/vendor/src/github.com/docker/engine-api/client/task_inspect.go
new file mode 100644
index 0000000000..3cac8882ef
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/task_inspect.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/engine-api/types/swarm"
+
+ "golang.org/x/net/context"
+)
+
+// TaskInspectWithRaw returns the task information and its raw representation..
+func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+ serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Task{}, nil, taskNotFoundError{taskID}
+ }
+ return swarm.Task{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ var response swarm.Task
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/task_list.go b/vendor/src/github.com/docker/engine-api/client/task_list.go
new file mode 100644
index 0000000000..4604513caf
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/client/task_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// TaskList returns the list of tasks.
+func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
+ query := url.Values{}
+
+ if options.Filter.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filter)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/tasks", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var tasks []swarm.Task
+ err = json.NewDecoder(resp.body).Decode(&tasks)
+ ensureReaderClosed(resp)
+ return tasks, err
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/client.go b/vendor/src/github.com/docker/engine-api/types/client.go
index 05d6cfaeba..b7993b74f0 100644
--- a/vendor/src/github.com/docker/engine-api/types/client.go
+++ b/vendor/src/github.com/docker/engine-api/types/client.go
@@ -240,3 +240,25 @@ type VersionResponse struct {
func (v VersionResponse) ServerOK() bool {
return v.Server != nil
}
+
+// NodeListOptions holds parameters to list nodes with.
+type NodeListOptions struct {
+ Filter filters.Args
+}
+
+// ServiceCreateResponse contains the information returned to a client
+// on the creation of a new service.
+type ServiceCreateResponse struct {
+ // ID is the ID of the created service.
+ ID string
+}
+
+// ServiceListOptions holds parameters to list services with.
+type ServiceListOptions struct {
+ Filter filters.Args
+}
+
+// TaskListOptions holds parameters to list tasks with.
+type TaskListOptions struct {
+ Filter filters.Args
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/network/network.go b/vendor/src/github.com/docker/engine-api/types/network/network.go
index bce60f5eec..47080b652e 100644
--- a/vendor/src/github.com/docker/engine-api/types/network/network.go
+++ b/vendor/src/github.com/docker/engine-api/types/network/network.go
@@ -23,8 +23,9 @@ type IPAMConfig struct {
// EndpointIPAMConfig represents IPAM configurations for the endpoint
type EndpointIPAMConfig struct {
- IPv4Address string `json:",omitempty"`
- IPv6Address string `json:",omitempty"`
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+ LinkLocalIPs []string `json:",omitempty"`
}
// EndpointSettings stores the network endpoint details
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/common.go b/vendor/src/github.com/docker/engine-api/types/swarm/common.go
new file mode 100644
index 0000000000..b87f545369
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/common.go
@@ -0,0 +1,21 @@
+package swarm
+
+import "time"
+
+// Version represent the internal object version.
+type Version struct {
+ Index uint64 `json:",omitempty"`
+}
+
+// Meta is base object inherited by most of the other once.
+type Meta struct {
+ Version Version `json:",omitempty"`
+ CreatedAt time.Time `json:",omitempty"`
+ UpdatedAt time.Time `json:",omitempty"`
+}
+
+// Annotations represents how to describe an object.
+type Annotations struct {
+ Name string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/container.go b/vendor/src/github.com/docker/engine-api/types/swarm/container.go
new file mode 100644
index 0000000000..ec6587664b
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/container.go
@@ -0,0 +1,67 @@
+package swarm
+
+import "time"
+
+// ContainerSpec represents the spec of a container.
+type ContainerSpec struct {
+ Image string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Command []string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Env []string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ User string `json:",omitempty"`
+ Mounts []Mount `json:",omitempty"`
+ StopGracePeriod *time.Duration `json:",omitempty"`
+}
+
+// MountType represents the type of a mount.
+type MountType string
+
+const (
+ // MountTypeBind BIND
+ MountTypeBind MountType = "bind"
+ // MountTypeVolume VOLUME
+ MountTypeVolume MountType = "volume"
+)
+
+// Mount represents a mount (volume).
+type Mount struct {
+ Type MountType `json:",omitempty"`
+ Source string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ Writable bool `json:",omitempty"`
+
+ BindOptions *BindOptions `json:",omitempty"`
+ VolumeOptions *VolumeOptions `json:",omitempty"`
+}
+
+// MountPropagation represents the propagation of a mount.
+type MountPropagation string
+
+const (
+ // MountPropagationRPrivate RPRIVATE
+ MountPropagationRPrivate MountPropagation = "rprivate"
+ // MountPropagationPrivate PRIVATE
+ MountPropagationPrivate MountPropagation = "private"
+ // MountPropagationRShared RSHARED
+ MountPropagationRShared MountPropagation = "rshared"
+ // MountPropagationShared SHARED
+ MountPropagationShared MountPropagation = "shared"
+ // MountPropagationRSlave RSLAVE
+ MountPropagationRSlave MountPropagation = "rslave"
+ // MountPropagationSlave SLAVE
+ MountPropagationSlave MountPropagation = "slave"
+)
+
+// BindOptions define options specific to mounts of type "bind".
+type BindOptions struct {
+ Propagation MountPropagation `json:",omitempty"`
+}
+
+// VolumeOptions represents the options for a mount of type volume.
+type VolumeOptions struct {
+ Populate bool `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ DriverConfig *Driver `json:",omitempty"`
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/network.go b/vendor/src/github.com/docker/engine-api/types/swarm/network.go
new file mode 100644
index 0000000000..3715a114ed
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/network.go
@@ -0,0 +1,99 @@
+package swarm
+
+// Endpoint represents an endpoint.
+type Endpoint struct {
+ Spec EndpointSpec `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+ VirtualIPs []EndpointVirtualIP `json:",omitempty"`
+}
+
+// EndpointSpec represents the spec of an endpoint.
+type EndpointSpec struct {
+ Mode ResolutionMode `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// ResolutionMode represents a resolution mode.
+type ResolutionMode string
+
+const (
+ // ResolutionModeVIP VIP
+ ResolutionModeVIP ResolutionMode = "vip"
+ // ResolutionModeDNSRR DNSRR
+ ResolutionModeDNSRR ResolutionMode = "dnsrr"
+)
+
+// PortConfig represents the config of a port.
+type PortConfig struct {
+ Name string `json:",omitempty"`
+ Protocol PortConfigProtocol `json:",omitempty"`
+ TargetPort uint32 `json:",omitempty"`
+ PublishedPort uint32 `json:",omitempty"`
+}
+
+// PortConfigProtocol represents the protocol of a port.
+type PortConfigProtocol string
+
+const (
+ // TODO(stevvooe): These should be used generally, not just for PortConfig.
+
+ // PortConfigProtocolTCP TCP
+ PortConfigProtocolTCP PortConfigProtocol = "tcp"
+ // PortConfigProtocolUDP UDP
+ PortConfigProtocolUDP PortConfigProtocol = "udp"
+)
+
+// EndpointVirtualIP represents the virtual ip of a port.
+type EndpointVirtualIP struct {
+ NetworkID string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Network represents a network.
+type Network struct {
+ ID string
+ Meta
+ Spec NetworkSpec `json:",omitempty"`
+ DriverState Driver `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkSpec represents the spec of a network.
+type NetworkSpec struct {
+ Annotations
+ DriverConfiguration *Driver `json:",omitempty"`
+ IPv6Enabled bool `json:",omitempty"`
+ Internal bool `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkAttachmentConfig represents the configuration of a network attachement.
+type NetworkAttachmentConfig struct {
+ Target string `json:",omitempty"`
+ Aliases []string `json:",omitempty"`
+}
+
+// NetworkAttachment represents a network attchement.
+type NetworkAttachment struct {
+ Network Network `json:",omitempty"`
+ Addresses []string `json:",omitempty"`
+}
+
+// IPAMOptions represents ipam options.
+type IPAMOptions struct {
+ Driver Driver `json:",omitempty"`
+ Configs []IPAMConfig `json:",omitempty"`
+}
+
+// IPAMConfig represents ipam configuration.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ Range string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+}
+
+// Driver represents a driver (network/volume).
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/node.go b/vendor/src/github.com/docker/engine-api/types/swarm/node.go
new file mode 100644
index 0000000000..8421f67a2f
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/node.go
@@ -0,0 +1,118 @@
+package swarm
+
+// Node represents a node.
+type Node struct {
+ ID string
+ Meta
+
+ Spec NodeSpec `json:",omitempty"`
+ Description NodeDescription `json:",omitempty"`
+ Status NodeStatus `json:",omitempty"`
+ ManagerStatus *ManagerStatus `json:",omitempty"`
+}
+
+// NodeSpec represents the spec of a node.
+type NodeSpec struct {
+ Annotations
+ Role NodeRole `json:",omitempty"`
+ Membership NodeMembership `json:",omitempty"`
+ Availability NodeAvailability `json:",omitempty"`
+}
+
+// NodeRole represents the role of a node.
+type NodeRole string
+
+const (
+ // NodeRoleWorker WORKER
+ NodeRoleWorker NodeRole = "worker"
+ // NodeRoleManager MANAGER
+ NodeRoleManager NodeRole = "manager"
+)
+
+// NodeMembership represents the membership of a node.
+type NodeMembership string
+
+const (
+ // NodeMembershipPending PENDING
+ NodeMembershipPending NodeMembership = "pending"
+ // NodeMembershipAccepted ACCEPTED
+ NodeMembershipAccepted NodeMembership = "accepted"
+)
+
+// NodeAvailability represents the availability of a node.
+type NodeAvailability string
+
+const (
+ // NodeAvailabilityActive ACTIVE
+ NodeAvailabilityActive NodeAvailability = "active"
+ // NodeAvailabilityPause PAUSE
+ NodeAvailabilityPause NodeAvailability = "pause"
+ // NodeAvailabilityDrain DRAIN
+ NodeAvailabilityDrain NodeAvailability = "drain"
+)
+
+// NodeDescription represents the description of a node.
+type NodeDescription struct {
+ Hostname string `json:",omitempty"`
+ Platform Platform `json:",omitempty"`
+ Resources Resources `json:",omitempty"`
+ Engine EngineDescription `json:",omitempty"`
+}
+
+// Platform represents the platfrom (Arch/OS).
+type Platform struct {
+ Architecture string `json:",omitempty"`
+ OS string `json:",omitempty"`
+}
+
+// EngineDescription represents the description of an engine.
+type EngineDescription struct {
+ EngineVersion string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Plugins []PluginDescription `json:",omitempty"`
+}
+
+// PluginDescription represents the description of an engine plugin.
+type PluginDescription struct {
+ Type string `json:",omitempty"`
+ Name string `json:",omitempty"`
+}
+
+// NodeStatus represents the status of a node.
+type NodeStatus struct {
+ State NodeState `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// Reachability represents the reachability of a node.
+type Reachability string
+
+const (
+ // ReachabilityUnknown UNKNOWN
+ ReachabilityUnknown Reachability = "unknown"
+ // ReachabilityUnreachable UNREACHABLE
+ ReachabilityUnreachable Reachability = "unreachable"
+ // ReachabilityReachable REACHABLE
+ ReachabilityReachable Reachability = "reachable"
+)
+
+// ManagerStatus represents the status of a manager.
+type ManagerStatus struct {
+ Leader bool `json:",omitempty"`
+ Reachability Reachability `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// NodeState represents the state of a node.
+type NodeState string
+
+const (
+ // NodeStateUnknown UNKNOWN
+ NodeStateUnknown NodeState = "unknown"
+ // NodeStateDown DOWN
+ NodeStateDown NodeState = "down"
+ // NodeStateReady READY
+ NodeStateReady NodeState = "ready"
+ // NodeStateDisconnected DISCONNECTED
+ NodeStateDisconnected NodeState = "disconnected"
+)
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/service.go b/vendor/src/github.com/docker/engine-api/types/swarm/service.go
new file mode 100644
index 0000000000..c7952c9395
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/service.go
@@ -0,0 +1,44 @@
+package swarm
+
+import "time"
+
+// Service represents a service.
+type Service struct {
+ ID string
+ Meta
+ Spec ServiceSpec `json:",omitempty"`
+ Endpoint Endpoint `json:",omitempty"`
+}
+
+// ServiceSpec represents the spec of a service.
+type ServiceSpec struct {
+ Annotations
+
+ // TaskTemplate defines how the service should construct new tasks when
+ // ochestrating this service.
+ TaskTemplate TaskSpec `json:",omitempty"`
+ Mode ServiceMode `json:",omitempty"`
+ UpdateConfig *UpdateConfig `json:",omitempty"`
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+ EndpointSpec *EndpointSpec `json:",omitempty"`
+}
+
+// ServiceMode represents the mode of a service.
+type ServiceMode struct {
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+}
+
+// ReplicatedService is a kind of ServiceMode.
+type ReplicatedService struct {
+ Replicas *uint64 `json:",omitempty"`
+}
+
+// GlobalService is a kind of ServiceMode.
+type GlobalService struct{}
+
+// UpdateConfig represents the update configuration.
+type UpdateConfig struct {
+ Parallelism uint64 `json:",omitempty"`
+ Delay time.Duration `json:",omitempty"`
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go
new file mode 100644
index 0000000000..052b6b3a7f
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go
@@ -0,0 +1,107 @@
+package swarm
+
+import "time"
+
+// Swarm represents a swarm.
+type Swarm struct {
+ ID string
+ Meta
+ Spec Spec
+}
+
+// Spec represents the spec of a swarm.
+type Spec struct {
+ Annotations
+
+ AcceptancePolicy AcceptancePolicy `json:",omitempty"`
+ Orchestration OrchestrationConfig `json:",omitempty"`
+ Raft RaftConfig `json:",omitempty"`
+ Dispatcher DispatcherConfig `json:",omitempty"`
+ CAConfig CAConfig `json:",omitempty"`
+}
+
+// AcceptancePolicy represents the list of policies.
+type AcceptancePolicy struct {
+ Policies []Policy `json:",omitempty"`
+}
+
+// Policy represents a role, autoaccept and secret.
+type Policy struct {
+ Role NodeRole
+ Autoaccept bool
+ Secret string `json:",omitempty"`
+}
+
+// OrchestrationConfig represents ochestration configuration.
+type OrchestrationConfig struct {
+ TaskHistoryRetentionLimit int64 `json:",omitempty"`
+}
+
+// RaftConfig represents raft configuration.
+type RaftConfig struct {
+ SnapshotInterval uint64 `json:",omitempty"`
+ KeepOldSnapshots uint64 `json:",omitempty"`
+ LogEntriesForSlowFollowers uint64 `json:",omitempty"`
+ HeartbeatTick uint32 `json:",omitempty"`
+ ElectionTick uint32 `json:",omitempty"`
+}
+
+// DispatcherConfig represents dispatcher configuration.
+type DispatcherConfig struct {
+ HeartbeatPeriod uint64 `json:",omitempty"`
+}
+
+// CAConfig represents CA configuration.
+type CAConfig struct {
+ NodeCertExpiry time.Duration `json:",omitempty"`
+}
+
+// InitRequest is the request used to init a swarm.
+type InitRequest struct {
+ ListenAddr string
+ ForceNewCluster bool
+ Spec Spec
+}
+
+// JoinRequest is the request used to join a swarm.
+type JoinRequest struct {
+ ListenAddr string
+ RemoteAddrs []string
+ Secret string // accept by secret
+ CACertHash string
+ Manager bool
+}
+
+// LocalNodeState represents the state of the local node.
+type LocalNodeState string
+
+const (
+ // LocalNodeStateInactive INACTIVE
+ LocalNodeStateInactive LocalNodeState = "inactive"
+ // LocalNodeStatePending PENDING
+ LocalNodeStatePending LocalNodeState = "pending"
+ // LocalNodeStateActive ACTIVE
+ LocalNodeStateActive LocalNodeState = "active"
+ // LocalNodeStateError ERROR
+ LocalNodeStateError LocalNodeState = "error"
+)
+
+// Info represents generic information about swarm.
+type Info struct {
+ NodeID string
+
+ LocalNodeState LocalNodeState
+ ControlAvailable bool
+ Error string
+
+ RemoteManagers []Peer
+ Nodes int
+ Managers int
+ CACertHash string
+}
+
+// Peer represents a peer.
+type Peer struct {
+ NodeID string
+ Addr string
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/task.go b/vendor/src/github.com/docker/engine-api/types/swarm/task.go
new file mode 100644
index 0000000000..ca5fcdef6f
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/task.go
@@ -0,0 +1,110 @@
+package swarm
+
+import "time"
+
+// TaskState represents the state of a task.
+type TaskState string
+
+const (
+ // TaskStateNew NEW
+ TaskStateNew TaskState = "new"
+ // TaskStateAllocated ALLOCATED
+ TaskStateAllocated TaskState = "allocated"
+ // TaskStatePending PENDING
+ TaskStatePending TaskState = "pending"
+ // TaskStateAssigned ASSIGNED
+ TaskStateAssigned TaskState = "assigned"
+ // TaskStateAccepted ACCEPTED
+ TaskStateAccepted TaskState = "accepted"
+ // TaskStatePreparing PREPARING
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateReady READY
+ TaskStateReady TaskState = "ready"
+ // TaskStateStarting STARTING
+ TaskStateStarting TaskState = "starting"
+ // TaskStateRunning RUNNING
+ TaskStateRunning TaskState = "running"
+ // TaskStateComplete COMPLETE
+ TaskStateComplete TaskState = "complete"
+ // TaskStateShutdown SHUTDOWN
+ TaskStateShutdown TaskState = "shutdown"
+ // TaskStateFailed FAILED
+ TaskStateFailed TaskState = "failed"
+ // TaskStateRejected REJECTED
+ TaskStateRejected TaskState = "rejected"
+)
+
+// Task represents a task.
+type Task struct {
+ ID string
+ Meta
+
+ Spec TaskSpec `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ Slot int `json:",omitempty"`
+ NodeID string `json:",omitempty"`
+ Status TaskStatus `json:",omitempty"`
+ DesiredState TaskState `json:",omitempty"`
+ NetworksAttachments []NetworkAttachment `json:",omitempty"`
+}
+
+// TaskSpec represents the spec of a task.
+type TaskSpec struct {
+ ContainerSpec ContainerSpec `json:",omitempty"`
+ Resources *ResourceRequirements `json:",omitempty"`
+ RestartPolicy *RestartPolicy `json:",omitempty"`
+ Placement *Placement `json:",omitempty"`
+}
+
+// Resources represents resources (CPU/Memory).
+type Resources struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+}
+
+// ResourceRequirements represents resources requirements.
+type ResourceRequirements struct {
+ Limits *Resources `json:",omitempty"`
+ Reservations *Resources `json:",omitempty"`
+}
+
+// Placement represents orchestration parameters.
+type Placement struct {
+ Constraints []string `json:",omitempty"`
+}
+
+// RestartPolicy represents the restart policy.
+type RestartPolicy struct {
+ Condition RestartPolicyCondition `json:",omitempty"`
+ Delay *time.Duration `json:",omitempty"`
+ MaxAttempts *uint64 `json:",omitempty"`
+ Window *time.Duration `json:",omitempty"`
+}
+
+// RestartPolicyCondition represents when to restart.
+type RestartPolicyCondition string
+
+const (
+ // RestartPolicyConditionNone NONE
+ RestartPolicyConditionNone RestartPolicyCondition = "none"
+ // RestartPolicyConditionOnFailure ON_FAILURE
+ RestartPolicyConditionOnFailure RestartPolicyCondition = "on_failure"
+ // RestartPolicyConditionAny ANY
+ RestartPolicyConditionAny RestartPolicyCondition = "any"
+)
+
+// TaskStatus represents the status of a task.
+type TaskStatus struct {
+ Timestamp time.Time `json:",omitempty"`
+ State TaskState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Err string `json:",omitempty"`
+ ContainerStatus ContainerStatus `json:",omitempty"`
+}
+
+// ContainerStatus represents the status of a container.
+type ContainerStatus struct {
+ ContainerID string `json:",omitempty"`
+ PID int `json:",omitempty"`
+ ExitCode int `json:",omitempty"`
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/types.go b/vendor/src/github.com/docker/engine-api/types/types.go
index fe453405aa..b91e3c10c6 100644
--- a/vendor/src/github.com/docker/engine-api/types/types.go
+++ b/vendor/src/github.com/docker/engine-api/types/types.go
@@ -7,6 +7,7 @@ import (
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
"github.com/docker/engine-api/types/registry"
+ "github.com/docker/engine-api/types/swarm"
"github.com/docker/go-connections/nat"
)
@@ -254,6 +255,7 @@ type Info struct {
SecurityOptions []string
Runtimes map[string]Runtime
DefaultRuntime string
+ Swarm swarm.Info
}
// PluginsInfo is a temp struct holding Plugins name
@@ -323,7 +325,7 @@ type ContainerNode struct {
Addr string
Name string
Cpus int
- Memory int
+ Memory int64
Labels map[string]string
}
diff --git a/vendor/src/github.com/docker/go-connections/nat/nat.go b/vendor/src/github.com/docker/go-connections/nat/nat.go
index 3d469165ab..bca3c2c99a 100644
--- a/vendor/src/github.com/docker/go-connections/nat/nat.go
+++ b/vendor/src/github.com/docker/go-connections/nat/nat.go
@@ -132,92 +132,112 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding,
exposedPorts = make(map[Port]struct{}, len(ports))
bindings = make(map[Port][]PortBinding)
)
-
for _, rawPort := range ports {
- proto := "tcp"
-
- if i := strings.LastIndex(rawPort, "/"); i != -1 {
- proto = rawPort[i+1:]
- rawPort = rawPort[:i]
- }
- if !strings.Contains(rawPort, ":") {
- rawPort = fmt.Sprintf("::%s", rawPort)
- } else if len(strings.Split(rawPort, ":")) == 2 {
- rawPort = fmt.Sprintf(":%s", rawPort)
- }
-
- parts, err := PartParser(portSpecTemplate, rawPort)
+ portMappings, err := ParsePortSpec(rawPort)
if err != nil {
return nil, nil, err
}
- var (
- containerPort = parts["containerPort"]
- rawIP = parts["ip"]
- hostPort = parts["hostPort"]
- )
-
- if rawIP != "" && net.ParseIP(rawIP) == nil {
- return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP)
- }
- if containerPort == "" {
- return nil, nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
+ for _, portMapping := range portMappings {
+ port := portMapping.Port
+ if _, exists := exposedPorts[port]; !exists {
+ exposedPorts[port] = struct{}{}
+ }
+ bslice, exists := bindings[port]
+ if !exists {
+ bslice = []PortBinding{}
+ }
+ bindings[port] = append(bslice, portMapping.Binding)
}
+ }
+ return exposedPorts, bindings, nil
+}
+
+// PortMapping is a data object mapping a Port to a PortBinding
+type PortMapping struct {
+ Port Port
+ Binding PortBinding
+}
+
+// ParsePortSpec parses a port specification string into a slice of PortMappings
+func ParsePortSpec(rawPort string) ([]PortMapping, error) {
+ proto := "tcp"
+
+ if i := strings.LastIndex(rawPort, "/"); i != -1 {
+ proto = rawPort[i+1:]
+ rawPort = rawPort[:i]
+ }
+ if !strings.Contains(rawPort, ":") {
+ rawPort = fmt.Sprintf("::%s", rawPort)
+ } else if len(strings.Split(rawPort, ":")) == 2 {
+ rawPort = fmt.Sprintf(":%s", rawPort)
+ }
- startPort, endPort, err := ParsePortRange(containerPort)
+ parts, err := PartParser(portSpecTemplate, rawPort)
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ containerPort = parts["containerPort"]
+ rawIP = parts["ip"]
+ hostPort = parts["hostPort"]
+ )
+
+ if rawIP != "" && net.ParseIP(rawIP) == nil {
+ return nil, fmt.Errorf("Invalid ip address: %s", rawIP)
+ }
+ if containerPort == "" {
+ return nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
+ }
+
+ startPort, endPort, err := ParsePortRange(containerPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
+ }
+
+ var startHostPort, endHostPort uint64 = 0, 0
+ if len(hostPort) > 0 {
+ startHostPort, endHostPort, err = ParsePortRange(hostPort)
if err != nil {
- return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
+ return nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
}
+ }
- var startHostPort, endHostPort uint64 = 0, 0
- if len(hostPort) > 0 {
- startHostPort, endHostPort, err = ParsePortRange(hostPort)
- if err != nil {
- return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
- }
+ if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
+ // Allow host port range iff containerPort is not a range.
+ // In this case, use the host port range as the dynamic
+ // host port range to allocate into.
+ if endPort != startPort {
+ return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
}
+ }
- if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
- // Allow host port range iff containerPort is not a range.
- // In this case, use the host port range as the dynamic
- // host port range to allocate into.
- if endPort != startPort {
- return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
- }
- }
+ if !validateProto(strings.ToLower(proto)) {
+ return nil, fmt.Errorf("Invalid proto: %s", proto)
+ }
- if !validateProto(strings.ToLower(proto)) {
- return nil, nil, fmt.Errorf("Invalid proto: %s", proto)
+ ports := []PortMapping{}
+ for i := uint64(0); i <= (endPort - startPort); i++ {
+ containerPort = strconv.FormatUint(startPort+i, 10)
+ if len(hostPort) > 0 {
+ hostPort = strconv.FormatUint(startHostPort+i, 10)
+ }
+ // Set hostPort to a range only if there is a single container port
+ // and a dynamic host port.
+ if startPort == endPort && startHostPort != endHostPort {
+ hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
+ }
+ port, err := NewPort(strings.ToLower(proto), containerPort)
+ if err != nil {
+ return nil, err
}
- for i := uint64(0); i <= (endPort - startPort); i++ {
- containerPort = strconv.FormatUint(startPort+i, 10)
- if len(hostPort) > 0 {
- hostPort = strconv.FormatUint(startHostPort+i, 10)
- }
- // Set hostPort to a range only if there is a single container port
- // and a dynamic host port.
- if startPort == endPort && startHostPort != endHostPort {
- hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
- }
- port, err := NewPort(strings.ToLower(proto), containerPort)
- if err != nil {
- return nil, nil, err
- }
- if _, exists := exposedPorts[port]; !exists {
- exposedPorts[port] = struct{}{}
- }
-
- binding := PortBinding{
- HostIP: rawIP,
- HostPort: hostPort,
- }
- bslice, exists := bindings[port]
- if !exists {
- bslice = []PortBinding{}
- }
- bindings[port] = append(bslice, binding)
+ binding := PortBinding{
+ HostIP: rawIP,
+ HostPort: hostPort,
}
+ ports = append(ports, PortMapping{Port: port, Binding: binding})
}
- return exposedPorts, bindings, nil
+ return ports, nil
}
diff --git a/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go
index 3395e40229..99846ffddb 100644
--- a/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go
+++ b/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go
@@ -79,11 +79,3 @@ func (a dummyAddr) Network() string {
func (a dummyAddr) String() string {
return string(a)
}
-
-// timeoutError is used when there is a timeout with a connection
-// this implements the net.Error interface
-type timeoutError struct{}
-
-func (e *timeoutError) Error() string { return "i/o timeout" }
-func (e *timeoutError) Timeout() bool { return true }
-func (e *timeoutError) Temporary() bool { return true }
diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config.go
index 9378c358e6..1ba04395e2 100644
--- a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go
+++ b/vendor/src/github.com/docker/go-connections/tlsconfig/config.go
@@ -72,12 +72,7 @@ func certPool(caFile string) (*x509.CertPool, error) {
if !certPool.AppendCertsFromPEM(pem) {
return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
}
- s := certPool.Subjects()
- subjects := make([]string, len(s))
- for i, subject := range s {
- subjects[i] = string(subject)
- }
- logrus.Debugf("Trusting certs with subjects: %v", subjects)
+ logrus.Debugf("Trusting %d certs", len(certPool.Subjects()))
return certPool, nil
}
diff --git a/vendor/src/github.com/docker/go-events/retry.go b/vendor/src/github.com/docker/go-events/retry.go
index 501deeb55f..4ddb3ac6a7 100644
--- a/vendor/src/github.com/docker/go-events/retry.go
+++ b/vendor/src/github.com/docker/go-events/retry.go
@@ -1,7 +1,9 @@
package events
import (
+ "math/rand"
"sync"
+ "sync/atomic"
"time"
"github.com/Sirupsen/logrus"
@@ -35,7 +37,6 @@ func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink {
// or the sink is closed.
func (rs *RetryingSink) Write(event Event) error {
logger := logrus.WithField("event", event)
- var timer *time.Timer
retry:
select {
@@ -45,16 +46,13 @@ retry:
}
if backoff := rs.strategy.Proceed(event); backoff > 0 {
- if timer == nil {
- timer = time.NewTimer(backoff)
- defer timer.Stop()
- } else {
- timer.Reset(backoff)
- }
-
select {
- case <-timer.C:
- goto retry
+ case <-time.After(backoff):
+ // TODO(stevvooe): This branch holds up the next try. Before, we
+ // would simply break to the "retry" label and then possibly wait
+ // again. However, this requires all retry strategies to have a
+ // large probability of probing the sync for success, rather than
+ // just backing off and sending the request.
case <-rs.closed:
return ErrSinkClosed
}
@@ -111,9 +109,6 @@ type RetryStrategy interface {
Success(event Event)
}
-// TODO(stevvooe): We are using circuit breaker here. May want to provide
-// bounded exponential backoff, as well.
-
// Breaker implements a circuit breaker retry strategy.
//
// The current implementation never drops events.
@@ -166,3 +161,89 @@ func (b *Breaker) Failure(event Event, err error) bool {
b.last = time.Now().UTC()
return false // never drop events.
}
+
+var (
+ // DefaultExponentialBackoffConfig provides a default configuration for
+ // exponential backoff.
+ DefaultExponentialBackoffConfig = ExponentialBackoffConfig{
+ Base: time.Second,
+ Factor: time.Second,
+ Max: 20 * time.Second,
+ }
+)
+
+// ExponentialBackoffConfig configures backoff parameters.
+//
+// Note that these parameters operate on the upper bound for choosing a random
+// value. For example, at Base=1s, a random value in [0,1s) will be chosen for
+// the backoff value.
+type ExponentialBackoffConfig struct {
+ // Base is the minimum bound for backing off after failure.
+ Base time.Duration
+
+ // Factor sets the amount of time by which the backoff grows with each
+ // failure.
+ Factor time.Duration
+
+ // Max is the absolute maxiumum bound for a single backoff.
+ Max time.Duration
+}
+
+// ExponentialBackoff implements random backoff with exponentially increasing
+// bounds as the number consecutive failures increase.
+type ExponentialBackoff struct {
+ config ExponentialBackoffConfig
+ failures uint64 // consecutive failure counter.
+}
+
+// NewExponentialBackoff returns an exponential backoff strategy with the
+// desired config. If config is nil, the default is returned.
+func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff {
+ return &ExponentialBackoff{
+ config: config,
+ }
+}
+
+// Proceed returns the next randomly bound exponential backoff time.
+func (b *ExponentialBackoff) Proceed(event Event) time.Duration {
+ return b.backoff(atomic.LoadUint64(&b.failures))
+}
+
+// Success resets the failures counter.
+func (b *ExponentialBackoff) Success(event Event) {
+ atomic.StoreUint64(&b.failures, 0)
+}
+
+// Failure increments the failure counter.
+func (b *ExponentialBackoff) Failure(event Event, err error) bool {
+ atomic.AddUint64(&b.failures, 1)
+ return false
+}
+
+// backoff calculates the amount of time to wait based on the number of
+// consecutive failures.
+func (b *ExponentialBackoff) backoff(failures uint64) time.Duration {
+ if failures <= 0 {
+ // proceed normally when there are no failures.
+ return 0
+ }
+
+ factor := b.config.Factor
+ if factor <= 0 {
+ factor = DefaultExponentialBackoffConfig.Factor
+ }
+
+ backoff := b.config.Base + factor*time.Duration(1<<(failures-1))
+
+ max := b.config.Max
+ if max <= 0 {
+ max = DefaultExponentialBackoffConfig.Max
+ }
+
+ if backoff > max || backoff < 0 {
+ backoff = max
+ }
+
+ // Choose a uniformly distributed value from [0, backoff).
+ return time.Duration(rand.Int63n(int64(backoff)))
+}
diff --git a/vendor/src/github.com/docker/libnetwork/Dockerfile.build b/vendor/src/github.com/docker/libnetwork/Dockerfile.build
index 035f852c03..cabcefe0ef 100644
--- a/vendor/src/github.com/docker/libnetwork/Dockerfile.build
+++ b/vendor/src/github.com/docker/libnetwork/Dockerfile.build
@@ -1,4 +1,4 @@
-FROM golang:1.5.3
+FROM golang:1.5.4
RUN apt-get update && apt-get -y install iptables
RUN go get github.com/tools/godep \
diff --git a/vendor/src/github.com/docker/libnetwork/Makefile b/vendor/src/github.com/docker/libnetwork/Makefile
index 6edbf8da12..899ca4b9be 100644
--- a/vendor/src/github.com/docker/libnetwork/Makefile
+++ b/vendor/src/github.com/docker/libnetwork/Makefile
@@ -47,7 +47,7 @@ check: ${build_image}.created
check-code:
@echo "Checking code... "
- test -z "$$(golint ./... | tee /dev/stderr)"
+ test -z "$$(golint ./... | grep -v .pb.go: | tee /dev/stderr)"
go vet ./...
@echo "Done checking code"
diff --git a/vendor/src/github.com/docker/libnetwork/ROADMAP.md b/vendor/src/github.com/docker/libnetwork/ROADMAP.md
index 9cb3174ec5..f2d8a42be4 100644
--- a/vendor/src/github.com/docker/libnetwork/ROADMAP.md
+++ b/vendor/src/github.com/docker/libnetwork/ROADMAP.md
@@ -4,7 +4,7 @@ This document defines the high-level goals of the libnetwork project. See [Proje
## Long-term Goal
-libnetwork project will follow Docker and Linux philosophy of delivering small, highly modular and composable tools that works well independently.
+libnetwork project will follow Docker and Linux philosophy of delivering small, highly modular and composable tools that work well independently.
libnetwork aims to satisfy that composable need for Networking in Containers.
## Short-term Goals
diff --git a/vendor/src/github.com/docker/libnetwork/agent.go b/vendor/src/github.com/docker/libnetwork/agent.go
index ca54d8c923..7276beefe5 100644
--- a/vendor/src/github.com/docker/libnetwork/agent.go
+++ b/vendor/src/github.com/docker/libnetwork/agent.go
@@ -1,10 +1,14 @@
package libnetwork
+//go:generate protoc -I.:Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. agent.proto
+
import (
+ "encoding/hex"
"fmt"
"net"
"os"
- "strings"
+ "sort"
+ "strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/go-events"
@@ -12,8 +16,18 @@ import (
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/networkdb"
+ "github.com/docker/libnetwork/types"
+ "github.com/gogo/protobuf/proto"
)
+// ByTime implements sort.Interface for []*types.EncryptionKey based on
+// the LamportTime field.
+type ByTime []*types.EncryptionKey
+
+func (b ByTime) Len() int { return len(b) }
+func (b ByTime) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b ByTime) Less(i, j int) bool { return b[i].LamportTime < b[j].LamportTime }
+
type agent struct {
networkDB *networkdb.NetworkDB
bindAddr string
@@ -55,15 +69,155 @@ func resolveAddr(addrOrInterface string) (string, error) {
return addrOrInterface, nil
}
- // If not a valid IP address, it should be a valid interface
- return getBindAddr(addrOrInterface)
+ addr, err := net.ResolveIPAddr("ip", addrOrInterface)
+ if err != nil {
+ // If not a valid IP address, it should be a valid interface
+ return getBindAddr(addrOrInterface)
+ }
+ return addr.String(), nil
+}
+
+func (c *controller) handleKeyChange(keys []*types.EncryptionKey) error {
+ drvEnc := discoverapi.DriverEncryptionUpdate{}
+
+ // Find the new key and add it to the key ring
+ a := c.agent
+ for _, key := range keys {
+ same := false
+ for _, cKey := range c.keys {
+ if same = cKey.LamportTime == key.LamportTime; same {
+ break
+ }
+ }
+ if !same {
+ c.keys = append(c.keys, key)
+ if key.Subsystem == "networking:gossip" {
+ a.networkDB.SetKey(key.Key)
+ }
+ if key.Subsystem == "networking:gossip" /*"networking:ipsec"*/ {
+ drvEnc.Key = hex.EncodeToString(key.Key)
+ drvEnc.Tag = strconv.FormatUint(key.LamportTime, 10)
+ }
+ break
+ }
+ }
+ // Find the deleted key. If the deleted key was the primary key,
+ // a new primary key should be set before removing if from keyring.
+ deleted := []byte{}
+ for i, cKey := range c.keys {
+ same := false
+ for _, key := range keys {
+ if same = key.LamportTime == cKey.LamportTime; same {
+ break
+ }
+ }
+ if !same {
+ if cKey.Subsystem == "networking:gossip" {
+ deleted = cKey.Key
+ }
+ if cKey.Subsystem == "networking:gossip" /*"networking:ipsec"*/ {
+ drvEnc.Prune = hex.EncodeToString(cKey.Key)
+ drvEnc.PruneTag = strconv.FormatUint(cKey.LamportTime, 10)
+ }
+ c.keys = append(c.keys[:i], c.keys[i+1:]...)
+ break
+ }
+ }
+
+ sort.Sort(ByTime(c.keys))
+ for _, key := range c.keys {
+ if key.Subsystem == "networking:gossip" {
+ a.networkDB.SetPrimaryKey(key.Key)
+ break
+ }
+ }
+ for _, key := range c.keys {
+ if key.Subsystem == "networking:gossip" /*"networking:ipsec"*/ {
+ drvEnc.Primary = hex.EncodeToString(key.Key)
+ drvEnc.PrimaryTag = strconv.FormatUint(key.LamportTime, 10)
+ break
+ }
+ }
+ if len(deleted) > 0 {
+ a.networkDB.RemoveKey(deleted)
+ }
+
+ c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
+ err := driver.DiscoverNew(discoverapi.EncryptionKeysUpdate, drvEnc)
+ if err != nil {
+ logrus.Warnf("Failed to update datapath keys in driver %s: %v", name, err)
+ }
+ return false
+ })
+
+ return nil
+}
+
+func (c *controller) agentSetup() error {
+ clusterProvider := c.cfg.Daemon.ClusterProvider
+
+ bindAddr, _, _ := net.SplitHostPort(clusterProvider.GetListenAddress())
+ remote := clusterProvider.GetRemoteAddress()
+ remoteAddr, _, _ := net.SplitHostPort(remote)
+
+ // Determine the BindAddress from RemoteAddress or through best-effort routing
+ if !isValidClusteringIP(bindAddr) {
+ if !isValidClusteringIP(remoteAddr) {
+ remote = "8.8.8.8:53"
+ }
+ conn, err := net.Dial("udp", remote)
+ if err == nil {
+ bindHostPort := conn.LocalAddr().String()
+ bindAddr, _, _ = net.SplitHostPort(bindHostPort)
+ conn.Close()
+ }
+ }
+
+ if bindAddr != "" && c.agent == nil {
+ if err := c.agentInit(bindAddr); err != nil {
+ logrus.Errorf("Error in agentInit : %v", err)
+ } else {
+ c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
+ if capability.DataScope == datastore.GlobalScope {
+ c.agentDriverNotify(driver)
+ }
+ return false
+ })
+
+ if c.agent != nil {
+ close(c.agentInitDone)
+ }
+ }
+ }
+ if remoteAddr != "" {
+ if err := c.agentJoin(remoteAddr); err != nil {
+ logrus.Errorf("Error in agentJoin : %v", err)
+ }
+ }
+ return nil
}
func (c *controller) agentInit(bindAddrOrInterface string) error {
- if !c.cfg.Daemon.IsAgent {
+ if !c.isAgent() {
return nil
}
+ drvEnc := discoverapi.DriverEncryptionConfig{}
+
+ // sort the keys by lamport time
+ sort.Sort(ByTime(c.keys))
+
+ gossipkey := [][]byte{}
+ for _, key := range c.keys {
+ if key.Subsystem == "networking:gossip" {
+ gossipkey = append(gossipkey, key.Key)
+ }
+ if key.Subsystem == "networking:gossip" /*"networking:ipsec"*/ {
+ drvEnc.Keys = append(drvEnc.Keys, hex.EncodeToString(key.Key))
+ drvEnc.Tags = append(drvEnc.Tags, strconv.FormatUint(key.LamportTime, 10))
+ }
+ }
+
bindAddr, err := resolveAddr(bindAddrOrInterface)
if err != nil {
return err
@@ -73,6 +227,7 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
nDB, err := networkdb.New(&networkdb.Config{
BindAddr: bindAddr,
NodeName: hostname,
+ Keys: gossipkey,
})
if err != nil {
@@ -89,15 +244,24 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
}
go c.handleTableEvents(ch, c.handleEpTableEvent)
+
+ c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
+ err := driver.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc)
+ if err != nil {
+ logrus.Warnf("Failed to set datapath keys in driver %s: %v", name, err)
+ }
+ return false
+ })
+
return nil
}
-func (c *controller) agentJoin(remotes []string) error {
+func (c *controller) agentJoin(remote string) error {
if c.agent == nil {
return nil
}
- return c.agent.networkDB.Join(remotes)
+ return c.agent.networkDB.Join([]string{remote})
}
func (c *controller) agentDriverNotify(d driverapi.Driver) {
@@ -109,6 +273,22 @@ func (c *controller) agentDriverNotify(d driverapi.Driver) {
Address: c.agent.bindAddr,
Self: true,
})
+
+ drvEnc := discoverapi.DriverEncryptionConfig{}
+ for _, key := range c.keys {
+ if key.Subsystem == "networking:gossip" /*"networking:ipsec"*/ {
+ drvEnc.Keys = append(drvEnc.Keys, hex.EncodeToString(key.Key))
+ drvEnc.Tags = append(drvEnc.Tags, strconv.FormatUint(key.LamportTime, 10))
+ }
+ }
+ c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
+ err := driver.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc)
+ if err != nil {
+ logrus.Warnf("Failed to set datapath keys in driver %s: %v", name, err)
+ }
+ return false
+ })
+
}
func (c *controller) agentClose() {
@@ -124,6 +304,7 @@ func (c *controller) agentClose() {
c.agent.epTblCancel()
c.agent.networkDB.Close()
+ c.agent = nil
}
func (n *network) isClusterEligible() bool {
@@ -165,12 +346,32 @@ func (ep *endpoint) addToCluster() error {
c := n.getController()
if !ep.isAnonymous() && ep.Iface().Address() != nil {
- if err := c.addServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.Iface().Address().IP); err != nil {
+ var ingressPorts []*PortConfig
+ if ep.svcID != "" {
+ // Gossip ingress ports only in ingress network.
+ if n.ingress {
+ ingressPorts = ep.ingressPorts
+ }
+
+ if err := c.addServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.virtualIP, ingressPorts, ep.Iface().Address().IP); err != nil {
+ return err
+ }
+ }
+
+ buf, err := proto.Marshal(&EndpointRecord{
+ Name: ep.Name(),
+ ServiceName: ep.svcName,
+ ServiceID: ep.svcID,
+ VirtualIP: ep.virtualIP.String(),
+ IngressPorts: ingressPorts,
+ EndpointIP: ep.Iface().Address().IP.String(),
+ })
+
+ if err != nil {
return err
}
- if err := c.agent.networkDB.CreateEntry("endpoint_table", n.ID(), ep.ID(), []byte(fmt.Sprintf("%s,%s,%s,%s", ep.Name(), ep.svcName,
- ep.svcID, ep.Iface().Address().IP))); err != nil {
+ if err := c.agent.networkDB.CreateEntry("endpoint_table", n.ID(), ep.ID(), buf); err != nil {
return err
}
}
@@ -192,8 +393,13 @@ func (ep *endpoint) deleteFromCluster() error {
c := n.getController()
if !ep.isAnonymous() {
- if ep.Iface().Address() != nil {
- if err := c.rmServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.Iface().Address().IP); err != nil {
+ if ep.svcID != "" && ep.Iface().Address() != nil {
+ var ingressPorts []*PortConfig
+ if n.ingress {
+ ingressPorts = ep.ingressPorts
+ }
+
+ if err := c.rmServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.virtualIP, ingressPorts, ep.Iface().Address().IP); err != nil {
return err
}
}
@@ -310,20 +516,21 @@ func (c *controller) handleEpTableEvent(ev events.Event) {
var (
nid string
eid string
- value string
+ value []byte
isAdd bool
+ epRec EndpointRecord
)
switch event := ev.(type) {
case networkdb.CreateEvent:
nid = event.NetworkID
eid = event.Key
- value = string(event.Value)
+ value = event.Value
isAdd = true
case networkdb.DeleteEvent:
nid = event.NetworkID
eid = event.Key
- value = string(event.Value)
+ value = event.Value
case networkdb.UpdateEvent:
logrus.Errorf("Unexpected update service table event = %#v", event)
}
@@ -335,16 +542,18 @@ func (c *controller) handleEpTableEvent(ev events.Event) {
}
n := nw.(*network)
- vals := strings.Split(value, ",")
- if len(vals) < 4 {
- logrus.Errorf("Incorrect service table value = %s", value)
+ err = proto.Unmarshal(value, &epRec)
+ if err != nil {
+ logrus.Errorf("Failed to unmarshal service table value: %v", err)
return
}
- name := vals[0]
- svcName := vals[1]
- svcID := vals[2]
- ip := net.ParseIP(vals[3])
+ name := epRec.Name
+ svcName := epRec.ServiceName
+ svcID := epRec.ServiceID
+ vip := net.ParseIP(epRec.VirtualIP)
+ ip := net.ParseIP(epRec.EndpointIP)
+ ingressPorts := epRec.IngressPorts
if name == "" || ip == nil {
logrus.Errorf("Invalid endpoint name/ip received while handling service table event %s", value)
@@ -352,16 +561,20 @@ func (c *controller) handleEpTableEvent(ev events.Event) {
}
if isAdd {
- if err := c.addServiceBinding(svcName, svcID, nid, eid, ip); err != nil {
- logrus.Errorf("Failed adding service binding for value %s: %v", value, err)
- return
+ if svcID != "" {
+ if err := c.addServiceBinding(svcName, svcID, nid, eid, vip, ingressPorts, ip); err != nil {
+ logrus.Errorf("Failed adding service binding for value %s: %v", value, err)
+ return
+ }
}
n.addSvcRecords(name, ip, nil, true)
} else {
- if err := c.rmServiceBinding(svcName, svcID, nid, eid, ip); err != nil {
- logrus.Errorf("Failed adding service binding for value %s: %v", value, err)
- return
+ if svcID != "" {
+ if err := c.rmServiceBinding(svcName, svcID, nid, eid, vip, ingressPorts, ip); err != nil {
+ logrus.Errorf("Failed adding service binding for value %s: %v", value, err)
+ return
+ }
}
n.deleteSvcRecords(name, ip, nil, true)
diff --git a/vendor/src/github.com/docker/libnetwork/agent.pb.go b/vendor/src/github.com/docker/libnetwork/agent.pb.go
new file mode 100644
index 0000000000..19b30422ad
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/agent.pb.go
@@ -0,0 +1,893 @@
+// Code generated by protoc-gen-gogo.
+// source: agent.proto
+// DO NOT EDIT!
+
+/*
+ Package libnetwork is a generated protocol buffer package.
+
+ It is generated from these files:
+ agent.proto
+
+ It has these top-level messages:
+ EndpointRecord
+ PortConfig
+*/
+package libnetwork
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+type PortConfig_Protocol int32
+
+const (
+ ProtocolTCP PortConfig_Protocol = 0
+ ProtocolUDP PortConfig_Protocol = 1
+)
+
+var PortConfig_Protocol_name = map[int32]string{
+ 0: "TCP",
+ 1: "UDP",
+}
+var PortConfig_Protocol_value = map[string]int32{
+ "TCP": 0,
+ "UDP": 1,
+}
+
+func (x PortConfig_Protocol) String() string {
+ return proto.EnumName(PortConfig_Protocol_name, int32(x))
+}
+func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 0} }
+
+// EndpointRecord specifies all the endpoint specific information that
+// needs to gossiped to nodes participating in the network.
+type EndpointRecord struct {
+ // Name of the endpoint
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Service name of the service to which this endpoint belongs.
+ ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ // Service ID of the service to which this endpoint belongs.
+ ServiceID string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ // Virtual IP of the service to which this endpoint belongs.
+ VirtualIP string `protobuf:"bytes,4,opt,name=virtual_ip,json=virtualIp,proto3" json:"virtual_ip,omitempty"`
+ // IP assigned to this endpoint.
+ EndpointIP string `protobuf:"bytes,5,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
+ // IngressPorts exposed by the service to which this endpoint belongs.
+ IngressPorts []*PortConfig `protobuf:"bytes,6,rep,name=ingress_ports,json=ingressPorts" json:"ingress_ports,omitempty"`
+}
+
+func (m *EndpointRecord) Reset() { *m = EndpointRecord{} }
+func (*EndpointRecord) ProtoMessage() {}
+func (*EndpointRecord) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0} }
+
+func (m *EndpointRecord) GetIngressPorts() []*PortConfig {
+ if m != nil {
+ return m.IngressPorts
+ }
+ return nil
+}
+
+// PortConfig specifies an exposed port which can be
+// addressed using the given name. This can be later queried
+// using a service discovery api or a DNS SRV query. The node
+// port specifies a port that can be used to address this
+// service external to the cluster by sending a connection
+// request to this port to any node on the cluster.
+type PortConfig struct {
+ // Name for the port. If provided the port information can
+ // be queried using the name as in a DNS SRV query.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Protocol for the port which is exposed.
+ Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=libnetwork.PortConfig_Protocol" json:"protocol,omitempty"`
+ // The port which the application is exposing and is bound to.
+ TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"`
+ // PublishedPort specifies the port on which the service is
+ // exposed on all nodes on the cluster. If not specified an
+ // arbitrary port in the node port range is allocated by the
+ // system. If specified it should be within the node port
+ // range and it should be available.
+ PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"`
+}
+
+func (m *PortConfig) Reset() { *m = PortConfig{} }
+func (*PortConfig) ProtoMessage() {}
+func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1} }
+
+func init() {
+ proto.RegisterType((*EndpointRecord)(nil), "libnetwork.EndpointRecord")
+ proto.RegisterType((*PortConfig)(nil), "libnetwork.PortConfig")
+ proto.RegisterEnum("libnetwork.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value)
+}
+func (this *EndpointRecord) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&libnetwork.EndpointRecord{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "ServiceName: "+fmt.Sprintf("%#v", this.ServiceName)+",\n")
+ s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n")
+ s = append(s, "VirtualIP: "+fmt.Sprintf("%#v", this.VirtualIP)+",\n")
+ s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
+ if this.IngressPorts != nil {
+ s = append(s, "IngressPorts: "+fmt.Sprintf("%#v", this.IngressPorts)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *PortConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&libnetwork.PortConfig{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n")
+ s = append(s, "TargetPort: "+fmt.Sprintf("%#v", this.TargetPort)+",\n")
+ s = append(s, "PublishedPort: "+fmt.Sprintf("%#v", this.PublishedPort)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringAgent(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringAgent(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *EndpointRecord) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EndpointRecord) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintAgent(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ if len(m.ServiceName) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintAgent(data, i, uint64(len(m.ServiceName)))
+ i += copy(data[i:], m.ServiceName)
+ }
+ if len(m.ServiceID) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintAgent(data, i, uint64(len(m.ServiceID)))
+ i += copy(data[i:], m.ServiceID)
+ }
+ if len(m.VirtualIP) > 0 {
+ data[i] = 0x22
+ i++
+ i = encodeVarintAgent(data, i, uint64(len(m.VirtualIP)))
+ i += copy(data[i:], m.VirtualIP)
+ }
+ if len(m.EndpointIP) > 0 {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintAgent(data, i, uint64(len(m.EndpointIP)))
+ i += copy(data[i:], m.EndpointIP)
+ }
+ if len(m.IngressPorts) > 0 {
+ for _, msg := range m.IngressPorts {
+ data[i] = 0x32
+ i++
+ i = encodeVarintAgent(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PortConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PortConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintAgent(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ if m.Protocol != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintAgent(data, i, uint64(m.Protocol))
+ }
+ if m.TargetPort != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintAgent(data, i, uint64(m.TargetPort))
+ }
+ if m.PublishedPort != 0 {
+ data[i] = 0x20
+ i++
+ i = encodeVarintAgent(data, i, uint64(m.PublishedPort))
+ }
+ return i, nil
+}
+
+func encodeFixed64Agent(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Agent(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintAgent(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *EndpointRecord) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.ServiceName)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.ServiceID)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.VirtualIP)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.EndpointIP)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if len(m.IngressPorts) > 0 {
+ for _, e := range m.IngressPorts {
+ l = e.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PortConfig) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.Protocol != 0 {
+ n += 1 + sovAgent(uint64(m.Protocol))
+ }
+ if m.TargetPort != 0 {
+ n += 1 + sovAgent(uint64(m.TargetPort))
+ }
+ if m.PublishedPort != 0 {
+ n += 1 + sovAgent(uint64(m.PublishedPort))
+ }
+ return n
+}
+
+func sovAgent(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozAgent(x uint64) (n int) {
+ return sovAgent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *EndpointRecord) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EndpointRecord{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
+ `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`,
+ `VirtualIP:` + fmt.Sprintf("%v", this.VirtualIP) + `,`,
+ `EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
+ `IngressPorts:` + strings.Replace(fmt.Sprintf("%v", this.IngressPorts), "PortConfig", "PortConfig", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PortConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PortConfig{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+ `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`,
+ `PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringAgent(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *EndpointRecord) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointRecord: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointRecord: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VirtualIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VirtualIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EndpointIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressPorts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IngressPorts = append(m.IngressPorts, &PortConfig{})
+ if err := m.IngressPorts[len(m.IngressPorts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PortConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PortConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ m.Protocol = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
+ }
+ m.TargetPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.TargetPort |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType)
+ }
+ m.PublishedPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.PublishedPort |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipAgent(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthAgent
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipAgent(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthAgent = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowAgent = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorAgent = []byte{
+ // 384 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xda, 0x40,
+ 0x18, 0xc6, 0x31, 0xb8, 0x08, 0xbf, 0xc6, 0x2e, 0x3a, 0x55, 0x95, 0xc5, 0x60, 0x28, 0x52, 0x25,
+ 0x86, 0xca, 0x48, 0x74, 0x64, 0x03, 0x3a, 0x78, 0xa9, 0x2c, 0xf7, 0xcf, 0x8a, 0x0c, 0xbe, 0xba,
+ 0xa7, 0xba, 0x3e, 0xeb, 0x7c, 0xd0, 0xb5, 0x63, 0x94, 0x2d, 0x1f, 0x20, 0x53, 0xbe, 0x4c, 0xc6,
+ 0x8c, 0x99, 0xa2, 0xc0, 0x9a, 0x25, 0x1f, 0x21, 0x77, 0x67, 0x1b, 0x14, 0x89, 0xe1, 0x95, 0x4e,
+ 0xbf, 0xe7, 0xf7, 0x9e, 0x5e, 0x3d, 0x60, 0x46, 0x09, 0xce, 0xb8, 0x97, 0x33, 0xca, 0x29, 0x82,
+ 0x94, 0xac, 0x33, 0xcc, 0xff, 0x51, 0xf6, 0xa7, 0xff, 0x2e, 0xa1, 0x09, 0x55, 0x78, 0x22, 0x5f,
+ 0xa5, 0x31, 0xba, 0x6a, 0x82, 0xfd, 0x25, 0x8b, 0x73, 0x4a, 0x32, 0x1e, 0xe2, 0x0d, 0x65, 0x31,
+ 0x42, 0xa0, 0x67, 0xd1, 0x5f, 0xec, 0x68, 0x43, 0x6d, 0x6c, 0x84, 0xea, 0x8d, 0x3e, 0x40, 0xb7,
+ 0xc0, 0x6c, 0x47, 0x36, 0x78, 0xa5, 0xb2, 0xa6, 0xca, 0xcc, 0x8a, 0x7d, 0x95, 0xca, 0x27, 0x80,
+ 0x5a, 0x21, 0xb1, 0xd3, 0x92, 0xc2, 0xdc, 0x3a, 0x3c, 0x0c, 0x8c, 0x6f, 0x25, 0xf5, 0x97, 0xa1,
+ 0x51, 0x09, 0x7e, 0x2c, 0xed, 0x1d, 0x61, 0x7c, 0x1b, 0xa5, 0x2b, 0x92, 0x3b, 0xfa, 0xc9, 0xfe,
+ 0x59, 0x52, 0x3f, 0x08, 0x8d, 0x4a, 0xf0, 0x73, 0x34, 0x01, 0x13, 0x57, 0x47, 0x4a, 0xfd, 0x8d,
+ 0xd2, 0x6d, 0xa1, 0x43, 0x7d, 0xbb, 0xf0, 0xa1, 0x56, 0xc4, 0xc2, 0x0c, 0x2c, 0x92, 0x25, 0x0c,
+ 0x17, 0xc5, 0x2a, 0xa7, 0x8c, 0x17, 0x4e, 0x7b, 0xd8, 0x1a, 0x9b, 0xd3, 0xf7, 0xde, 0xa9, 0x10,
+ 0x2f, 0x10, 0xc1, 0x82, 0x66, 0xbf, 0x48, 0x12, 0x76, 0x2b, 0x59, 0xa2, 0x62, 0xf4, 0xa4, 0x01,
+ 0x9c, 0xc2, 0xb3, 0x7d, 0xcc, 0xa0, 0xa3, 0xfa, 0xdb, 0xd0, 0x54, 0x75, 0x61, 0x4f, 0x07, 0xe7,
+ 0xbf, 0xf6, 0x82, 0x4a, 0x0b, 0x8f, 0x0b, 0x68, 0x00, 0x26, 0x8f, 0x58, 0x82, 0xb9, 0xba, 0x4d,
+ 0x55, 0x65, 0x85, 0x50, 0x22, 0xb9, 0x89, 0x3e, 0x82, 0x9d, 0x6f, 0xd7, 0x29, 0x29, 0x7e, 0xe3,
+ 0xb8, 0x74, 0x74, 0xe5, 0x58, 0x47, 0x2a, 0xb5, 0xd1, 0x12, 0x3a, 0xf5, 0xef, 0xc8, 0x81, 0xd6,
+ 0xf7, 0x45, 0xd0, 0x6b, 0xf4, 0xdf, 0x5e, 0x5e, 0x0f, 0xcd, 0x1a, 0x0b, 0x24, 0x93, 0x1f, 0xcb,
+ 0xa0, 0xa7, 0xbd, 0x4e, 0x04, 0xea, 0xeb, 0x17, 0x37, 0x6e, 0x63, 0xee, 0xdc, 0xef, 0xdd, 0xc6,
+ 0xf3, 0xde, 0xd5, 0xfe, 0x1f, 0x5c, 0xed, 0x56, 0xcc, 0x9d, 0x98, 0x47, 0x31, 0xeb, 0xb6, 0xba,
+ 0xf8, 0xf3, 0x4b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x6d, 0x44, 0x68, 0x53, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/libnetwork/agent.proto b/vendor/src/github.com/docker/libnetwork/agent.proto
new file mode 100644
index 0000000000..5d2b096755
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/agent.proto
@@ -0,0 +1,66 @@
+syntax = "proto3";
+
+import "gogoproto/gogo.proto";
+
+package libnetwork;
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.stringer_all) = true;
+option (gogoproto.gostring_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.goproto_stringer_all) = false;
+
+// EndpointRecord specifies all the endpoint specific information that
+// needs to gossiped to nodes participating in the network.
+message EndpointRecord {
+ // Name of the endpoint
+ string name = 1;
+
+ // Service name of the service to which this endpoint belongs.
+ string service_name = 2;
+
+ // Service ID of the service to which this endpoint belongs.
+ string service_id = 3 [(gogoproto.customname) = "ServiceID"];
+
+ // Virtual IP of the service to which this endpoint belongs.
+ string virtual_ip = 4 [(gogoproto.customname) = "VirtualIP"];
+
+ // IP assigned to this endpoint.
+ string endpoint_ip = 5 [(gogoproto.customname) = "EndpointIP"];
+
+ // IngressPorts exposed by the service to which this endpoint belongs.
+ repeated PortConfig ingress_ports = 6;
+}
+
+// PortConfig specifies an exposed port which can be
+// addressed using the given name. This can be later queried
+// using a service discovery api or a DNS SRV query. The node
+// port specifies a port that can be used to address this
+// service external to the cluster by sending a connection
+// request to this port to any node on the cluster.
+message PortConfig {
+ enum Protocol {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ TCP = 0 [(gogoproto.enumvalue_customname) = "ProtocolTCP"];
+ UDP = 1 [(gogoproto.enumvalue_customname) = "ProtocolUDP"];
+ }
+
+ // Name for the port. If provided the port information can
+ // be queried using the name as in a DNS SRV query.
+ string name = 1;
+
+ // Protocol for the port which is exposed.
+ Protocol protocol = 2;
+
+ // The port which the application is exposing and is bound to.
+ uint32 target_port = 3;
+
+ // PublishedPort specifies the port on which the service is
+ // exposed on all nodes on the cluster. If not specified an
+ // arbitrary port in the node port range is allocated by the
+ // system. If specified it should be within the node port
+ // range and it should be available.
+ uint32 published_port = 4;
+}
diff --git a/vendor/src/github.com/docker/libnetwork/circle.yml b/vendor/src/github.com/docker/libnetwork/circle.yml
index a454d21201..a5db509c67 100644
--- a/vendor/src/github.com/docker/libnetwork/circle.yml
+++ b/vendor/src/github.com/docker/libnetwork/circle.yml
@@ -5,7 +5,6 @@ machine:
dependencies:
override:
- sudo apt-get update; sudo apt-get install -y iptables zookeeperd
- - go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/goimports
- go get golang.org/x/tools/cmd/cover
- go get github.com/tools/godep
diff --git a/vendor/src/github.com/docker/libnetwork/cluster/provider.go b/vendor/src/github.com/docker/libnetwork/cluster/provider.go
new file mode 100644
index 0000000000..3b91a41ff8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cluster/provider.go
@@ -0,0 +1,10 @@
+package cluster
+
+// Provider provides clustering config details
+type Provider interface {
+ IsManager() bool
+ IsAgent() bool
+ GetListenAddress() string
+ GetRemoteAddress() string
+ ListenClusterEvents() <-chan struct{}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/config/config.go b/vendor/src/github.com/docker/libnetwork/config/config.go
index 62d9993a90..2bae6f459f 100644
--- a/vendor/src/github.com/docker/libnetwork/config/config.go
+++ b/vendor/src/github.com/docker/libnetwork/config/config.go
@@ -8,6 +8,7 @@ import (
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/pkg/tlsconfig"
"github.com/docker/libkv/store"
+ "github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/netlabel"
)
@@ -21,15 +22,14 @@ type Config struct {
// DaemonCfg represents libnetwork core configuration
type DaemonCfg struct {
- Debug bool
- IsAgent bool
- DataDir string
- DefaultNetwork string
- DefaultDriver string
- Bind string
- Neighbors []string
- Labels []string
- DriverCfg map[string]interface{}
+ Debug bool
+ DataDir string
+ DefaultNetwork string
+ DefaultDriver string
+ Labels []string
+ DriverCfg map[string]interface{}
+ ClusterProvider cluster.Provider
+ DisableProvider chan struct{}
}
// ClusterCfg represents cluster configuration
@@ -69,7 +69,8 @@ func ParseConfig(tomlCfgFile string) (*Config, error) {
func ParseConfigOptions(cfgOptions ...Option) *Config {
cfg := &Config{
Daemon: DaemonCfg{
- DriverCfg: make(map[string]interface{}),
+ DriverCfg: make(map[string]interface{}),
+ DisableProvider: make(chan struct{}, 10),
},
Scopes: make(map[string]*datastore.ScopeCfg),
}
@@ -84,27 +85,6 @@ func ParseConfigOptions(cfgOptions ...Option) *Config {
// to the controller
type Option func(c *Config)
-// OptionBind function returns an option setter for setting a bind interface or address
-func OptionBind(bind string) Option {
- return func(c *Config) {
- c.Daemon.Bind = bind
- }
-}
-
-// OptionAgent function returns an option setter for setting agent mode
-func OptionAgent() Option {
- return func(c *Config) {
- c.Daemon.IsAgent = true
- }
-}
-
-// OptionNeighbors function returns an option setter for setting a list of neighbors to join.
-func OptionNeighbors(neighbors []string) Option {
- return func(c *Config) {
- c.Daemon.Neighbors = neighbors
- }
-}
-
// OptionDefaultNetwork function returns an option setter for a default network
func OptionDefaultNetwork(dn string) Option {
return func(c *Config) {
diff --git a/vendor/src/github.com/docker/libnetwork/controller.go b/vendor/src/github.com/docker/libnetwork/controller.go
index fa14b1cf1f..551e888b40 100644
--- a/vendor/src/github.com/docker/libnetwork/controller.go
+++ b/vendor/src/github.com/docker/libnetwork/controller.go
@@ -54,6 +54,7 @@ import (
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/pkg/stringid"
+ "github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/discoverapi"
@@ -69,7 +70,7 @@ import (
// NetworkController provides the interface for controller instance which manages
// networks.
type NetworkController interface {
- // ID provides an unique identity for the controller
+ // ID provides a unique identity for the controller
ID() string
// Config method returns the bootup configuration for the controller
@@ -90,7 +91,7 @@ type NetworkController interface {
// NetworkByID returns the Network which has the passed id. If not found, the error ErrNoSuchNetwork is returned.
NetworkByID(id string) (Network, error)
- // NewSandbox cretes a new network sandbox for the passed container id
+ // NewSandbox creates a new network sandbox for the passed container id
NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error)
// Sandboxes returns the list of Sandbox(s) managed by this controller.
@@ -110,6 +111,15 @@ type NetworkController interface {
// ReloadCondfiguration updates the controller configuration
ReloadConfiguration(cfgOptions ...config.Option) error
+
+ // SetClusterProvider sets cluster provider
+ SetClusterProvider(provider cluster.Provider)
+
+ // Wait for agent initialization complete in libnetwork controller
+ AgentInitWait()
+
+ // SetKeys configures the encryption key for gossip and overlay data path
+ SetKeys(keys []*types.EncryptionKey) error
}
// NetworkWalker is a client provided function which will be used to walk the Networks.
@@ -123,21 +133,25 @@ type SandboxWalker func(sb Sandbox) bool
type sandboxTable map[string]*sandbox
type controller struct {
- id string
- drvRegistry *drvregistry.DrvRegistry
- sandboxes sandboxTable
- cfg *config.Config
- stores []datastore.DataStore
- discovery hostdiscovery.HostDiscovery
- extKeyListener net.Listener
- watchCh chan *endpoint
- unWatchCh chan *endpoint
- svcRecords map[string]svcInfo
- nmap map[string]*netWatch
- serviceBindings map[string]*service
- defOsSbox osl.Sandbox
- sboxOnce sync.Once
- agent *agent
+ id string
+ drvRegistry *drvregistry.DrvRegistry
+ sandboxes sandboxTable
+ cfg *config.Config
+ stores []datastore.DataStore
+ discovery hostdiscovery.HostDiscovery
+ extKeyListener net.Listener
+ watchCh chan *endpoint
+ unWatchCh chan *endpoint
+ svcRecords map[string]svcInfo
+ nmap map[string]*netWatch
+ serviceBindings map[string]*service
+ defOsSbox osl.Sandbox
+ ingressSandbox *sandbox
+ sboxOnce sync.Once
+ agent *agent
+ agentInitDone chan struct{}
+ keys []*types.EncryptionKey
+ clusterConfigAvailable bool
sync.Mutex
}
@@ -154,14 +168,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
sandboxes: sandboxTable{},
svcRecords: make(map[string]svcInfo),
serviceBindings: make(map[string]*service),
- }
-
- if err := c.agentInit(c.cfg.Daemon.Bind); err != nil {
- return nil, err
- }
-
- if err := c.agentJoin(c.cfg.Daemon.Neighbors); err != nil {
- return nil, err
+ agentInitDone: make(chan struct{}),
}
if err := c.initStores(); err != nil {
@@ -196,6 +203,14 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
}
}
+ // Reserve pools first before doing cleanup. This is because
+ // if the pools are not populated properly, the cleanups of
+ // endpoint/network and sandbox below will not be able to
+ // release ip subnets and addresses properly into the pool
+ // because the pools won't exist.
+ c.reservePools()
+
+ // Cleanup resources
c.sandboxCleanup()
c.cleanupLocalEndpoints()
c.networkCleanup()
@@ -207,6 +222,84 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
return c, nil
}
+func (c *controller) SetClusterProvider(provider cluster.Provider) {
+ c.Lock()
+ defer c.Unlock()
+ c.cfg.Daemon.ClusterProvider = provider
+ if provider != nil {
+ go c.clusterAgentInit()
+ } else {
+ c.cfg.Daemon.DisableProvider <- struct{}{}
+ }
+}
+
+func isValidClusteringIP(addr string) bool {
+ return addr != "" && !net.ParseIP(addr).IsLoopback() && !net.ParseIP(addr).IsUnspecified()
+}
+
+// libnetwork side of agent depends on the keys. On the first receipt of
+// keys setup the agent. For subsequent key set handle the key change
+func (c *controller) SetKeys(keys []*types.EncryptionKey) error {
+ c.Lock()
+ existingKeys := c.keys
+ clusterConfigAvailable := c.clusterConfigAvailable
+ agent := c.agent
+ c.Unlock()
+ if len(existingKeys) == 0 {
+ c.Lock()
+ c.keys = keys
+ c.Unlock()
+ if agent != nil {
+ return (fmt.Errorf("libnetwork agent setup without keys"))
+ }
+ if clusterConfigAvailable {
+ return c.agentSetup()
+ }
+ log.Debugf("received encryption keys before cluster config")
+ return nil
+ }
+ if agent == nil {
+ c.Lock()
+ c.keys = keys
+ c.Unlock()
+ return nil
+ }
+ return c.handleKeyChange(keys)
+}
+
+func (c *controller) clusterAgentInit() {
+ clusterProvider := c.cfg.Daemon.ClusterProvider
+ for {
+ select {
+ case <-clusterProvider.ListenClusterEvents():
+ if !c.isDistributedControl() {
+ c.Lock()
+ c.clusterConfigAvailable = true
+ keys := c.keys
+ c.Unlock()
+ // agent initialization needs encyrption keys and bind/remote IP which
+ // comes from the daemon cluster events
+ if len(keys) > 0 {
+ c.agentSetup()
+ }
+ }
+ case <-c.cfg.Daemon.DisableProvider:
+ c.Lock()
+ c.clusterConfigAvailable = false
+ c.agentInitDone = make(chan struct{})
+ c.Unlock()
+ c.agentClose()
+ return
+ }
+ }
+}
+
+// AgentInitWait waits for agent initialization to be completed in the
+// controller.
+func (c *controller) AgentInitWait() {
+ <-c.agentInitDone
+}
+
func (c *controller) makeDriverConfig(ntype string) map[string]interface{} {
if c.cfg == nil {
return nil
@@ -246,28 +339,6 @@ func (c *controller) makeDriverConfig(ntype string) map[string]interface{} {
var procReloadConfig = make(chan (bool), 1)
-func (c *controller) processAgentConfig(cfg *config.Config) (bool, error) {
- if c.cfg.Daemon.IsAgent == cfg.Daemon.IsAgent {
- // Agent configuration not changed
- return false, nil
- }
-
- c.Lock()
- c.cfg = cfg
- c.Unlock()
-
- if err := c.agentInit(c.cfg.Daemon.Bind); err != nil {
- return false, err
- }
-
- if err := c.agentJoin(c.cfg.Daemon.Neighbors); err != nil {
- c.agentClose()
- return false, err
- }
-
- return true, nil
-}
-
func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
procReloadConfig <- true
defer func() { <-procReloadConfig }()
@@ -277,15 +348,6 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
update := false
cfg := config.ParseConfigOptions(cfgOptions...)
- isAgentConfig, err := c.processAgentConfig(cfg)
- if err != nil {
- return err
- }
-
- if isAgentConfig {
- return nil
- }
-
for s := range c.cfg.Scopes {
if _, ok := cfg.Scopes[s]; !ok {
return types.ForbiddenErrorf("cannot accept new configuration because it removes an existing datastore client")
@@ -451,6 +513,24 @@ func (c *controller) Config() config.Config {
return *c.cfg
}
+func (c *controller) isManager() bool {
+ if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil {
+ return false
+ }
+ return c.cfg.Daemon.ClusterProvider.IsManager()
+}
+
+func (c *controller) isAgent() bool {
+ if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil {
+ return false
+ }
+ return c.cfg.Daemon.ClusterProvider.IsAgent()
+}
+
+func (c *controller) isDistributedControl() bool {
+ return !c.isManager() && !c.isAgent()
+}
+
func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver, capability driverapi.Capability) error {
c.Lock()
hd := c.discovery
@@ -489,13 +569,27 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
network.processOptions(options...)
+ _, cap, err := network.resolveDriver(networkType, true)
+ if err != nil {
+ return nil, err
+ }
+
+ if cap.DataScope == datastore.GlobalScope && !c.isDistributedControl() && !network.dynamic {
+ if c.isManager() {
+ // For non-distributed controlled environment, globalscoped non-dynamic networks are redirected to Manager
+ return nil, ManagerRedirectError(name)
+ }
+
+ return nil, types.ForbiddenErrorf("Cannot create a multi-host network from a worker node. Please create the network from a manager node.")
+ }
+
// Make sure we have a driver available for this network type
// before we allocate anything.
if _, err := network.driver(true); err != nil {
return nil, err
}
- err := network.ipamAllocate()
+ err = network.ipamAllocate()
if err != nil {
return nil, err
}
@@ -546,6 +640,52 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
return network, nil
}
+func (c *controller) reservePools() {
+ networks, err := c.getNetworksForScope(datastore.LocalScope)
+ if err != nil {
+ log.Warnf("Could not retrieve networks from local store during ipam allocation for existing networks: %v", err)
+ return
+ }
+
+ for _, n := range networks {
+ if !doReplayPoolReserve(n) {
+ continue
+ }
+ // Construct pseudo configs for the auto IP case
+ autoIPv4 := (len(n.ipamV4Config) == 0 || (len(n.ipamV4Config) == 1 && n.ipamV4Config[0].PreferredPool == "")) && len(n.ipamV4Info) > 0
+ autoIPv6 := (len(n.ipamV6Config) == 0 || (len(n.ipamV6Config) == 1 && n.ipamV6Config[0].PreferredPool == "")) && len(n.ipamV6Info) > 0
+ if autoIPv4 {
+ n.ipamV4Config = []*IpamConf{{PreferredPool: n.ipamV4Info[0].Pool.String()}}
+ }
+ if n.enableIPv6 && autoIPv6 {
+ n.ipamV6Config = []*IpamConf{{PreferredPool: n.ipamV6Info[0].Pool.String()}}
+ }
+ // Account current network gateways
+ for i, c := range n.ipamV4Config {
+ if c.Gateway == "" && n.ipamV4Info[i].Gateway != nil {
+ c.Gateway = n.ipamV4Info[i].Gateway.IP.String()
+ }
+ }
+ for i, c := range n.ipamV6Config {
+ if c.Gateway == "" && n.ipamV6Info[i].Gateway != nil {
+ c.Gateway = n.ipamV6Info[i].Gateway.IP.String()
+ }
+ }
+ if err := n.ipamAllocate(); err != nil {
+ log.Warnf("Failed to allocate ipam pool(s) for network %q (%s): %v", n.Name(), n.ID(), err)
+ }
+ }
+}
+
+func doReplayPoolReserve(n *network) bool {
+ _, caps, err := n.getController().getIPAMDriver(n.ipamType)
+ if err != nil {
+ log.Warnf("Failed to retrieve ipam driver for network %q (%s): %v", n.Name(), n.ID(), err)
+ return false
+ }
+ return caps.RequiresRequestReplay
+}
+
func (c *controller) addNetwork(n *network) error {
d, err := n.driver(true)
if err != nil {
@@ -623,9 +763,7 @@ func (c *controller) NetworkByID(id string) (Network, error) {
}
// NewSandbox creates a new sandbox for the passed container id
-func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) {
- var err error
-
+func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (sBox Sandbox, err error) {
if containerID == "" {
return nil, types.BadRequestErrorf("invalid container ID")
}
@@ -662,11 +800,32 @@ func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (S
controller: c,
}
}
+ sBox = sb
heap.Init(&sb.endpoints)
sb.processOptions(options...)
+ c.Lock()
+ if sb.ingress && c.ingressSandbox != nil {
+ c.Unlock()
+ return nil, fmt.Errorf("ingress sandbox already present")
+ }
+
+ if sb.ingress {
+ c.ingressSandbox = sb
+ }
+ c.Unlock()
+ defer func() {
+ if err != nil {
+ c.Lock()
+ if sb.ingress {
+ c.ingressSandbox = nil
+ }
+ c.Unlock()
+ }
+ }()
+
if err = sb.setupResolutionFiles(); err != nil {
return nil, err
}
diff --git a/vendor/src/github.com/docker/libnetwork/discoverapi/discoverapi.go b/vendor/src/github.com/docker/libnetwork/discoverapi/discoverapi.go
index eeacc3204e..080424a182 100644
--- a/vendor/src/github.com/docker/libnetwork/discoverapi/discoverapi.go
+++ b/vendor/src/github.com/docker/libnetwork/discoverapi/discoverapi.go
@@ -18,6 +18,10 @@ const (
NodeDiscovery = iota + 1
// DatastoreConfig represents an add/remove datastore event
DatastoreConfig
+ // EncryptionKeysConfig represents the initial key(s) for performing datapath encryption
+ EncryptionKeysConfig
+ // EncryptionKeysUpdate represents an update to the datapath encryption key(s)
+ EncryptionKeysUpdate
)
// NodeDiscoveryData represents the structure backing the node discovery data json string
@@ -33,3 +37,23 @@ type DatastoreConfigData struct {
Address string
Config interface{}
}
+
+// DriverEncryptionConfig contains the initial datapath encryption key(s)
+// Key in first position is the primary key, the one to be used in tx.
+// Original key and tag types are []byte and uint64
+type DriverEncryptionConfig struct {
+ Keys []string
+ Tags []string
+}
+
+// DriverEncryptionUpdate carries an update to the encryption key(s) as:
+// a new key and/or set a primary key and/or a removal of an existing key.
+// Original key and tag types are []byte and uint64
+type DriverEncryptionUpdate struct {
+ Key string
+ Tag string
+ Primary string
+ PrimaryTag string
+ Prune string
+ PruneTag string
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
index baa38db5a4..4064e6272a 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -19,6 +19,7 @@ import (
"github.com/docker/libnetwork/iptables"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/options"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/portmapper"
@@ -119,6 +120,7 @@ type driver struct {
isolationChain *iptables.ChainInfo
networks map[string]*bridgeNetwork
store datastore.DataStore
+ nlh *netlink.Handle
sync.Mutex
}
@@ -615,8 +617,15 @@ func (d *driver) createNetwork(config *networkConfiguration) error {
}
}()
+ // Initialize handle when needed
+ d.Lock()
+ if d.nlh == nil {
+ d.nlh = ns.NlHandle()
+ }
+ d.Unlock()
+
// Create or retrieve the bridge L3 interface
- bridgeIface := newInterface(config)
+ bridgeIface := newInterface(d.nlh, config)
network.bridge = bridgeIface
// Verify the network configuration does not conflict with previously installed
@@ -758,7 +767,7 @@ func (d *driver) DeleteNetwork(nid string) error {
// We only delete the bridge when it's not the default bridge. This is keep the backward compatible behavior.
if !config.DefaultBridge {
- if err := netlink.LinkDel(n.bridge.Link); err != nil {
+ if err := d.nlh.LinkDel(n.bridge.Link); err != nil {
logrus.Warnf("Failed to remove bridge interface %s on network %s delete: %v", config.BridgeName, nid, err)
}
}
@@ -772,12 +781,12 @@ func (d *driver) DeleteNetwork(nid string) error {
return d.storeDelete(config)
}
-func addToBridge(ifaceName, bridgeName string) error {
- link, err := netlink.LinkByName(ifaceName)
+func addToBridge(nlh *netlink.Handle, ifaceName, bridgeName string) error {
+ link, err := nlh.LinkByName(ifaceName)
if err != nil {
return fmt.Errorf("could not find interface %s: %v", ifaceName, err)
}
- if err = netlink.LinkSetMaster(link,
+ if err = nlh.LinkSetMaster(link,
&netlink.Bridge{LinkAttrs: netlink.LinkAttrs{Name: bridgeName}}); err != nil {
logrus.Debugf("Failed to add %s to bridge via netlink.Trying ioctl: %v", ifaceName, err)
iface, err := net.InterfaceByName(ifaceName)
@@ -795,8 +804,8 @@ func addToBridge(ifaceName, bridgeName string) error {
return nil
}
-func setHairpinMode(link netlink.Link, enable bool) error {
- err := netlink.LinkSetHairpin(link, enable)
+func setHairpinMode(nlh *netlink.Handle, link netlink.Link, enable bool) error {
+ err := nlh.LinkSetHairpin(link, enable)
if err != nil && err != syscall.EINVAL {
// If error is not EINVAL something else went wrong, bail out right away
return fmt.Errorf("unable to set hairpin mode on %s via netlink: %v",
@@ -887,13 +896,13 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}()
// Generate a name for what will be the host side pipe interface
- hostIfName, err := netutils.GenerateIfaceName(vethPrefix, vethLen)
+ hostIfName, err := netutils.GenerateIfaceName(d.nlh, vethPrefix, vethLen)
if err != nil {
return err
}
// Generate a name for what will be the sandbox side pipe interface
- containerIfName, err := netutils.GenerateIfaceName(vethPrefix, vethLen)
+ containerIfName, err := netutils.GenerateIfaceName(d.nlh, vethPrefix, vethLen)
if err != nil {
return err
}
@@ -902,29 +911,29 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: hostIfName, TxQLen: 0},
PeerName: containerIfName}
- if err = netlink.LinkAdd(veth); err != nil {
+ if err = d.nlh.LinkAdd(veth); err != nil {
return types.InternalErrorf("failed to add the host (%s) <=> sandbox (%s) pair interfaces: %v", hostIfName, containerIfName, err)
}
// Get the host side pipe interface handler
- host, err := netlink.LinkByName(hostIfName)
+ host, err := d.nlh.LinkByName(hostIfName)
if err != nil {
return types.InternalErrorf("failed to find host side interface %s: %v", hostIfName, err)
}
defer func() {
if err != nil {
- netlink.LinkDel(host)
+ d.nlh.LinkDel(host)
}
}()
// Get the sandbox side pipe interface handler
- sbox, err := netlink.LinkByName(containerIfName)
+ sbox, err := d.nlh.LinkByName(containerIfName)
if err != nil {
return types.InternalErrorf("failed to find sandbox side interface %s: %v", containerIfName, err)
}
defer func() {
if err != nil {
- netlink.LinkDel(sbox)
+ d.nlh.LinkDel(sbox)
}
}()
@@ -934,23 +943,23 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
// Add bridge inherited attributes to pipe interfaces
if config.Mtu != 0 {
- err = netlink.LinkSetMTU(host, config.Mtu)
+ err = d.nlh.LinkSetMTU(host, config.Mtu)
if err != nil {
return types.InternalErrorf("failed to set MTU on host interface %s: %v", hostIfName, err)
}
- err = netlink.LinkSetMTU(sbox, config.Mtu)
+ err = d.nlh.LinkSetMTU(sbox, config.Mtu)
if err != nil {
return types.InternalErrorf("failed to set MTU on sandbox interface %s: %v", containerIfName, err)
}
}
// Attach host side pipe interface into the bridge
- if err = addToBridge(hostIfName, config.BridgeName); err != nil {
+ if err = addToBridge(d.nlh, hostIfName, config.BridgeName); err != nil {
return fmt.Errorf("adding interface %s to bridge %s failed: %v", hostIfName, config.BridgeName, err)
}
if !dconfig.EnableUserlandProxy {
- err = setHairpinMode(host, true)
+ err = setHairpinMode(d.nlh, host, true)
if err != nil {
return err
}
@@ -971,7 +980,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
// Up the host interface after finishing all netlink configuration
- if err = netlink.LinkSetUp(host); err != nil {
+ if err = d.nlh.LinkSetUp(host); err != nil {
return fmt.Errorf("could not set link up for host interface %s: %v", hostIfName, err)
}
@@ -1056,8 +1065,8 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
// Try removal of link. Discard error: it is a best effort.
// Also make sure defer does not see this error either.
- if link, err := netlink.LinkByName(ep.srcName); err == nil {
- netlink.LinkDel(link)
+ if link, err := d.nlh.LinkByName(ep.srcName); err == nil {
+ d.nlh.LinkDel(link)
}
return nil
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_store.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_store.go
index de9635289a..e10a429ed2 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_store.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_store.go
@@ -46,7 +46,7 @@ func (d *driver) populateNetworks() error {
for _, kvo := range kvol {
ncfg := kvo.(*networkConfiguration)
if err = d.createNetwork(ncfg); err != nil {
- logrus.Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state", ncfg.ID, ncfg.BridgeName)
+ logrus.Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state: %v", ncfg.ID, ncfg.BridgeName, err)
}
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go
index 0e0d67aa69..bda369608f 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go
@@ -35,7 +35,7 @@ func (einc *ErrInvalidNetworkConfig) Error() string {
// Forbidden denotes the type of this error
func (einc *ErrInvalidNetworkConfig) Forbidden() {}
-// ErrInvalidContainerConfig error is returned when a endpoint create is attempted with an invalid configuration.
+// ErrInvalidContainerConfig error is returned when an endpoint create is attempted with an invalid configuration.
type ErrInvalidContainerConfig struct{}
func (eicc *ErrInvalidContainerConfig) Error() string {
@@ -45,7 +45,7 @@ func (eicc *ErrInvalidContainerConfig) Error() string {
// BadRequest denotes the type of this error
func (eicc *ErrInvalidContainerConfig) BadRequest() {}
-// ErrInvalidEndpointConfig error is returned when a endpoint create is attempted with an invalid endpoint configuration.
+// ErrInvalidEndpointConfig error is returned when an endpoint create is attempted with an invalid endpoint configuration.
type ErrInvalidEndpointConfig struct{}
func (eiec *ErrInvalidEndpointConfig) Error() string {
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go
index cdf68836fc..4a5dbfcbe5 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go
@@ -4,6 +4,7 @@ import (
"fmt"
"net"
+ "github.com/Sirupsen/logrus"
"github.com/vishvananda/netlink"
)
@@ -20,14 +21,16 @@ type bridgeInterface struct {
bridgeIPv6 *net.IPNet
gatewayIPv4 net.IP
gatewayIPv6 net.IP
+ nlh *netlink.Handle
}
// newInterface creates a new bridge interface structure. It attempts to find
// an already existing device identified by the configuration BridgeName field,
-// or the default bridge name when unspecified), but doesn't attempt to create
+// or the default bridge name when unspecified, but doesn't attempt to create
// one when missing
-func newInterface(config *networkConfiguration) *bridgeInterface {
- i := &bridgeInterface{}
+func newInterface(nlh *netlink.Handle, config *networkConfiguration) *bridgeInterface {
+ var err error
+ i := &bridgeInterface{nlh: nlh}
// Initialize the bridge name to the default if unspecified.
if config.BridgeName == "" {
@@ -35,7 +38,10 @@ func newInterface(config *networkConfiguration) *bridgeInterface {
}
// Attempt to find an existing bridge named with the specified name.
- i.Link, _ = netlink.LinkByName(config.BridgeName)
+ i.Link, err = nlh.LinkByName(config.BridgeName)
+ if err != nil {
+ logrus.Debugf("Did not find any interface with name %s: %v", config.BridgeName, err)
+ }
return i
}
@@ -47,14 +53,14 @@ func (i *bridgeInterface) exists() bool {
// addresses returns a single IPv4 address and all IPv6 addresses for the
// bridge interface.
func (i *bridgeInterface) addresses() (netlink.Addr, []netlink.Addr, error) {
- v4addr, err := netlink.AddrList(i.Link, netlink.FAMILY_V4)
+ v4addr, err := i.nlh.AddrList(i.Link, netlink.FAMILY_V4)
if err != nil {
- return netlink.Addr{}, nil, err
+ return netlink.Addr{}, nil, fmt.Errorf("Failed to retrieve V4 addresses: %v", err)
}
- v6addr, err := netlink.AddrList(i.Link, netlink.FAMILY_V6)
+ v6addr, err := i.nlh.AddrList(i.Link, netlink.FAMILY_V6)
if err != nil {
- return netlink.Addr{}, nil, err
+ return netlink.Addr{}, nil, fmt.Errorf("Failed to retrieve V6 addresses: %v", err)
}
if len(v4addr) == 0 {
@@ -72,7 +78,7 @@ func (i *bridgeInterface) programIPv6Address() error {
if findIPv6Address(nlAddr, nlAddressList) {
return nil
}
- if err := netlink.AddrAdd(i.Link, &nlAddr); err != nil {
+ if err := i.nlh.AddrAdd(i.Link, &nlAddr); err != nil {
return &IPv6AddrAddError{IP: i.bridgeIPv6, Err: err}
}
return nil
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go
index ddd9e452d0..0961bea55d 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go
@@ -35,14 +35,14 @@ func setupDevice(config *networkConfiguration, i *bridgeInterface) error {
setMac = kv.Kernel > 3 || (kv.Kernel == 3 && kv.Major >= 3)
}
- if err = netlink.LinkAdd(i.Link); err != nil {
+ if err = i.nlh.LinkAdd(i.Link); err != nil {
logrus.Debugf("Failed to create bridge %s via netlink. Trying ioctl", config.BridgeName)
return ioctlCreateBridge(config.BridgeName, setMac)
}
if setMac {
hwAddr := netutils.GenerateRandomMAC()
- if err = netlink.LinkSetHardwareAddr(i.Link, hwAddr); err != nil {
+ if err = i.nlh.LinkSetHardwareAddr(i.Link, hwAddr); err != nil {
return fmt.Errorf("failed to set bridge mac-address %s : %s", hwAddr, err.Error())
}
logrus.Debugf("Setting bridge mac address to %s", hwAddr)
@@ -52,15 +52,17 @@ func setupDevice(config *networkConfiguration, i *bridgeInterface) error {
// SetupDeviceUp ups the given bridge interface.
func setupDeviceUp(config *networkConfiguration, i *bridgeInterface) error {
- err := netlink.LinkSetUp(i.Link)
+ err := i.nlh.LinkSetUp(i.Link)
if err != nil {
- return err
+ return fmt.Errorf("Failed to set link up for %s: %v", config.BridgeName, err)
}
// Attempt to update the bridge interface to refresh the flags status,
// ignoring any failure to do so.
- if lnk, err := netlink.LinkByName(config.BridgeName); err == nil {
+ if lnk, err := i.nlh.LinkByName(config.BridgeName); err == nil {
i.Link = lnk
+ } else {
+ logrus.Warnf("Failed to retrieve link for interface (%s): %v", config.BridgeName, err)
}
return nil
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
index db913909c1..f11adc2cb1 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
@@ -18,12 +18,12 @@ func setupBridgeIPv4(config *networkConfiguration, i *bridgeInterface) error {
if !types.CompareIPNet(addrv4.IPNet, config.AddressIPv4) {
if addrv4.IPNet != nil {
- if err := netlink.AddrDel(i.Link, &addrv4); err != nil {
+ if err := i.nlh.AddrDel(i.Link, &addrv4); err != nil {
return fmt.Errorf("failed to remove current ip address from bridge: %v", err)
}
}
log.Debugf("Assigning address to bridge interface %s: %s", config.BridgeName, config.AddressIPv4)
- if err := netlink.AddrAdd(i.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
+ if err := i.nlh.AddrAdd(i.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
return &IPv4AddrAddError{IP: config.AddressIPv4, Err: err}
}
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go
index 3f07a0af8f..ee3d753ac1 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go
@@ -64,7 +64,7 @@ func setupBridgeIPv6(config *networkConfiguration, i *bridgeInterface) error {
// Setting route to global IPv6 subnet
logrus.Debugf("Adding route to IPv6 network %s via device %s", config.AddressIPv6.String(), config.BridgeName)
- err = netlink.RouteAdd(&netlink.Route{
+ err = i.nlh.RouteAdd(&netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
LinkIndex: i.Link.Attrs().Index,
Dst: config.AddressIPv6,
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
index 4564d46ded..f6ef7ae552 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
@@ -1,6 +1,8 @@
package bridge
import (
+ "fmt"
+
log "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/types"
"github.com/vishvananda/netlink"
@@ -10,7 +12,7 @@ func setupVerifyAndReconcile(config *networkConfiguration, i *bridgeInterface) e
// Fetch a single IPv4 and a slice of IPv6 addresses from the bridge.
addrv4, addrsv6, err := i.addresses()
if err != nil {
- return err
+ return fmt.Errorf("Failed to verify ip addresses: %v", err)
}
// Verify that the bridge does have an IPv4 address.
@@ -32,7 +34,7 @@ func setupVerifyAndReconcile(config *networkConfiguration, i *bridgeInterface) e
// Release any residual IPv6 address that might be there because of older daemon instances
for _, addrv6 := range addrsv6 {
if addrv6.IP.IsGlobalUnicast() && !types.CompareIPNet(addrv6.IPNet, i.bridgeIPv6) {
- if err := netlink.AddrDel(i.Link, &addrv6); err != nil {
+ if err := i.nlh.AddrDel(i.Link, &addrv6); err != nil {
log.Warnf("Failed to remove residual IPv6 address %s from bridge: %v", addrv6.IPNet, err)
}
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go b/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go
index c192408e2e..204c83f74b 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go
@@ -6,9 +6,9 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/types"
- "github.com/vishvananda/netlink"
)
// CreateEndpoint assigns the mac, ip and endpoint id for the new container
@@ -70,8 +70,8 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
if ep == nil {
return fmt.Errorf("endpoint id %q not found", eid)
}
- if link, err := netlink.LinkByName(ep.srcName); err == nil {
- netlink.LinkDel(link)
+ if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {
+ ns.NlHandle().LinkDel(link)
}
return nil
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go b/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go
index 771f126d0b..b0be3d68d7 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go
@@ -7,6 +7,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/types"
)
@@ -34,7 +35,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
return fmt.Errorf("could not find endpoint with id %s", eid)
}
// generate a name for the iface that will be renamed to eth0 in the sbox
- containerIfName, err := netutils.GenerateIfaceName(vethPrefix, vethLen)
+ containerIfName, err := netutils.GenerateIfaceName(ns.NlHandle(), vethPrefix, vethLen)
if err != nil {
return fmt.Errorf("error generating an interface name: %v", err)
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go b/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
index 60d3d965ca..97db98c56f 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
@@ -6,6 +6,7 @@ import (
"strings"
"github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/ns"
"github.com/vishvananda/netlink"
)
@@ -27,7 +28,7 @@ func createIPVlan(containerIfName, parent, ipvlanMode string) (string, error) {
return "", fmt.Errorf("the requested parent interface %s was not found on the Docker host", parent)
}
// Get the link for the master index (Example: the docker host eth iface)
- parentLink, err := netlink.LinkByName(parent)
+ parentLink, err := ns.NlHandle().LinkByName(parent)
if err != nil {
return "", fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", ipvlanType, parent, err)
}
@@ -39,7 +40,7 @@ func createIPVlan(containerIfName, parent, ipvlanMode string) (string, error) {
},
Mode: mode,
}
- if err := netlink.LinkAdd(ipvlan); err != nil {
+ if err := ns.NlHandle().LinkAdd(ipvlan); err != nil {
// If a user creates a macvlan and ipvlan on same parent, only one slave iface can be active at a time.
return "", fmt.Errorf("failed to create the %s port: %v", ipvlanType, err)
}
@@ -61,7 +62,7 @@ func setIPVlanMode(mode string) (netlink.IPVlanMode, error) {
// parentExists check if the specified interface exists in the default namespace
func parentExists(ifaceStr string) bool {
- _, err := netlink.LinkByName(ifaceStr)
+ _, err := ns.NlHandle().LinkByName(ifaceStr)
if err != nil {
return false
}
@@ -81,7 +82,7 @@ func createVlanLink(parentName string) error {
return fmt.Errorf("vlan id must be between 1-4094, received: %d", vidInt)
}
// get the parent link to attach a vlan subinterface
- parentLink, err := netlink.LinkByName(parent)
+ parentLink, err := ns.NlHandle().LinkByName(parent)
if err != nil {
return fmt.Errorf("failed to find master interface %s on the Docker host: %v", parent, err)
}
@@ -93,11 +94,11 @@ func createVlanLink(parentName string) error {
VlanId: vidInt,
}
// create the subinterface
- if err := netlink.LinkAdd(vlanLink); err != nil {
+ if err := ns.NlHandle().LinkAdd(vlanLink); err != nil {
return fmt.Errorf("failed to create %s vlan link: %v", vlanLink.Name, err)
}
// Bring the new netlink iface up
- if err := netlink.LinkSetUp(vlanLink); err != nil {
+ if err := ns.NlHandle().LinkSetUp(vlanLink); err != nil {
return fmt.Errorf("failed to enable %s the ipvlan parent link %v", vlanLink.Name, err)
}
logrus.Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt)
@@ -115,7 +116,7 @@ func delVlanLink(linkName string) error {
return err
}
// delete the vlan subinterface
- vlanLink, err := netlink.LinkByName(linkName)
+ vlanLink, err := ns.NlHandle().LinkByName(linkName)
if err != nil {
return fmt.Errorf("failed to find interface %s on the Docker host : %v", linkName, err)
}
@@ -124,7 +125,7 @@ func delVlanLink(linkName string) error {
return fmt.Errorf("interface %s does not appear to be a slave device: %v", linkName, err)
}
// delete the ipvlan slave device
- if err := netlink.LinkDel(vlanLink); err != nil {
+ if err := ns.NlHandle().LinkDel(vlanLink); err != nil {
return fmt.Errorf("failed to delete %s link: %v", linkName, err)
}
logrus.Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName)
@@ -163,15 +164,15 @@ func createDummyLink(dummyName, truncNetID string) error {
Name: dummyName,
},
}
- if err := netlink.LinkAdd(parent); err != nil {
+ if err := ns.NlHandle().LinkAdd(parent); err != nil {
return err
}
- parentDummyLink, err := netlink.LinkByName(dummyName)
+ parentDummyLink, err := ns.NlHandle().LinkByName(dummyName)
if err != nil {
return fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", ipvlanType, dummyName, err)
}
// bring the new netlink iface up
- if err := netlink.LinkSetUp(parentDummyLink); err != nil {
+ if err := ns.NlHandle().LinkSetUp(parentDummyLink); err != nil {
return fmt.Errorf("failed to enable %s the ipvlan parent link: %v", dummyName, err)
}
@@ -181,7 +182,7 @@ func createDummyLink(dummyName, truncNetID string) error {
// delDummyLink deletes the link type dummy used when -o parent is not passed
func delDummyLink(linkName string) error {
// delete the vlan subinterface
- dummyLink, err := netlink.LinkByName(linkName)
+ dummyLink, err := ns.NlHandle().LinkByName(linkName)
if err != nil {
return fmt.Errorf("failed to find link %s on the Docker host : %v", linkName, err)
}
@@ -190,7 +191,7 @@ func delDummyLink(linkName string) error {
return fmt.Errorf("link %s is not a parent dummy interface", linkName)
}
// delete the ipvlan dummy device
- if err := netlink.LinkDel(dummyLink); err != nil {
+ if err := ns.NlHandle().LinkDel(dummyLink); err != nil {
return fmt.Errorf("failed to delete the dummy %s link: %v", linkName, err)
}
logrus.Debugf("Deleted a dummy parent link: %s", linkName)
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go
index ee52ec54b1..3187a54562 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go
@@ -7,9 +7,9 @@ import (
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/types"
- "github.com/vishvananda/netlink"
)
// CreateEndpoint assigns the mac, ip and endpoint id for the new container
@@ -74,8 +74,8 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
if ep == nil {
return fmt.Errorf("endpoint id %q not found", eid)
}
- if link, err := netlink.LinkByName(ep.srcName); err == nil {
- netlink.LinkDel(link)
+ if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {
+ ns.NlHandle().LinkDel(link)
}
return nil
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go
index 04df3723c4..3656fdfe3f 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go
@@ -7,6 +7,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl"
)
@@ -22,7 +23,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
return fmt.Errorf("could not find endpoint with id %s", eid)
}
// generate a name for the iface that will be renamed to eth0 in the sbox
- containerIfName, err := netutils.GenerateIfaceName(vethPrefix, vethLen)
+ containerIfName, err := netutils.GenerateIfaceName(ns.NlHandle(), vethPrefix, vethLen)
if err != nil {
return fmt.Errorf("error generating an interface name: %s", err)
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go
index 8a56e88589..a3e17f28e2 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go
@@ -6,6 +6,7 @@ import (
"strings"
"github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/ns"
"github.com/vishvananda/netlink"
)
@@ -27,7 +28,7 @@ func createMacVlan(containerIfName, parent, macvlanMode string) (string, error)
return "", fmt.Errorf("the requested parent interface %s was not found on the Docker host", parent)
}
// Get the link for the master index (Example: the docker host eth iface)
- parentLink, err := netlink.LinkByName(parent)
+ parentLink, err := ns.NlHandle().LinkByName(parent)
if err != nil {
return "", fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", macvlanType, parent, err)
}
@@ -39,7 +40,7 @@ func createMacVlan(containerIfName, parent, macvlanMode string) (string, error)
},
Mode: mode,
}
- if err := netlink.LinkAdd(macvlan); err != nil {
+ if err := ns.NlHandle().LinkAdd(macvlan); err != nil {
// If a user creates a macvlan and ipvlan on same parent, only one slave iface can be active at a time.
return "", fmt.Errorf("failed to create the %s port: %v", macvlanType, err)
}
@@ -65,7 +66,7 @@ func setMacVlanMode(mode string) (netlink.MacvlanMode, error) {
// parentExists check if the specified interface exists in the default namespace
func parentExists(ifaceStr string) bool {
- _, err := netlink.LinkByName(ifaceStr)
+ _, err := ns.NlHandle().LinkByName(ifaceStr)
if err != nil {
return false
}
@@ -85,7 +86,7 @@ func createVlanLink(parentName string) error {
return fmt.Errorf("vlan id must be between 1-4094, received: %d", vidInt)
}
// get the parent link to attach a vlan subinterface
- parentLink, err := netlink.LinkByName(parent)
+ parentLink, err := ns.NlHandle().LinkByName(parent)
if err != nil {
return fmt.Errorf("failed to find master interface %s on the Docker host: %v", parent, err)
}
@@ -97,11 +98,11 @@ func createVlanLink(parentName string) error {
VlanId: vidInt,
}
// create the subinterface
- if err := netlink.LinkAdd(vlanLink); err != nil {
+ if err := ns.NlHandle().LinkAdd(vlanLink); err != nil {
return fmt.Errorf("failed to create %s vlan link: %v", vlanLink.Name, err)
}
// Bring the new netlink iface up
- if err := netlink.LinkSetUp(vlanLink); err != nil {
+ if err := ns.NlHandle().LinkSetUp(vlanLink); err != nil {
return fmt.Errorf("failed to enable %s the macvlan parent link %v", vlanLink.Name, err)
}
logrus.Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt)
@@ -119,7 +120,7 @@ func delVlanLink(linkName string) error {
return err
}
// delete the vlan subinterface
- vlanLink, err := netlink.LinkByName(linkName)
+ vlanLink, err := ns.NlHandle().LinkByName(linkName)
if err != nil {
return fmt.Errorf("failed to find interface %s on the Docker host : %v", linkName, err)
}
@@ -128,7 +129,7 @@ func delVlanLink(linkName string) error {
return fmt.Errorf("interface %s does not appear to be a slave device: %v", linkName, err)
}
// delete the macvlan slave device
- if err := netlink.LinkDel(vlanLink); err != nil {
+ if err := ns.NlHandle().LinkDel(vlanLink); err != nil {
return fmt.Errorf("failed to delete %s link: %v", linkName, err)
}
logrus.Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName)
@@ -167,15 +168,15 @@ func createDummyLink(dummyName, truncNetID string) error {
Name: dummyName,
},
}
- if err := netlink.LinkAdd(parent); err != nil {
+ if err := ns.NlHandle().LinkAdd(parent); err != nil {
return err
}
- parentDummyLink, err := netlink.LinkByName(dummyName)
+ parentDummyLink, err := ns.NlHandle().LinkByName(dummyName)
if err != nil {
return fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", macvlanType, dummyName, err)
}
// bring the new netlink iface up
- if err := netlink.LinkSetUp(parentDummyLink); err != nil {
+ if err := ns.NlHandle().LinkSetUp(parentDummyLink); err != nil {
return fmt.Errorf("failed to enable %s the macvlan parent link: %v", dummyName, err)
}
@@ -185,7 +186,7 @@ func createDummyLink(dummyName, truncNetID string) error {
// delDummyLink deletes the link type dummy used when -o parent is not passed
func delDummyLink(linkName string) error {
// delete the vlan subinterface
- dummyLink, err := netlink.LinkByName(linkName)
+ dummyLink, err := ns.NlHandle().LinkByName(linkName)
if err != nil {
return fmt.Errorf("failed to find link %s on the Docker host : %v", linkName, err)
}
@@ -194,7 +195,7 @@ func delDummyLink(linkName string) error {
return fmt.Errorf("link %s is not a parent dummy interface", linkName)
}
// delete the macvlan dummy device
- if err := netlink.LinkDel(dummyLink); err != nil {
+ if err := ns.NlHandle().LinkDel(dummyLink); err != nil {
return fmt.Errorf("failed to delete the dummy %s link: %v", linkName, err)
}
logrus.Debugf("Deleted a dummy parent link: %s", linkName)
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go
new file mode 100644
index 0000000000..fc82ac3700
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go
@@ -0,0 +1,578 @@
+package overlay
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "net"
+ "sync"
+ "syscall"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/iptables"
+ "github.com/docker/libnetwork/types"
+ "github.com/vishvananda/netlink"
+ "strconv"
+)
+
+const (
+ mark = uint32(0xD0C4E3)
+ timeout = 30
+)
+
+const (
+ forward = iota + 1
+ reverse
+ bidir
+)
+
+type key struct {
+ value []byte
+ tag uint32
+}
+
+func (k *key) String() string {
+ return fmt.Sprintf("(key: %s, tag: 0x%x)", hex.EncodeToString(k.value)[0:5], k.tag)
+}
+
+type spi struct {
+ forward int
+ reverse int
+}
+
+func (s *spi) String() string {
+ return fmt.Sprintf("SPI(FWD: 0x%x, REV: 0x%x)", uint32(s.forward), uint32(s.reverse))
+}
+
+type encrMap struct {
+ nodes map[string][]*spi
+ sync.Mutex
+}
+
+func (e *encrMap) String() string {
+ e.Lock()
+ defer e.Unlock()
+ b := new(bytes.Buffer)
+ for k, v := range e.nodes {
+ b.WriteString("\n")
+ b.WriteString(k)
+ b.WriteString(":")
+ b.WriteString("[")
+ for _, s := range v {
+ b.WriteString(s.String())
+ b.WriteString(",")
+ }
+ b.WriteString("]")
+
+ }
+ return b.String()
+}
+
+func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error {
+ log.Infof("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
+
+ n := d.network(nid)
+ if n == nil || !n.secure {
+ return nil
+ }
+
+ if len(d.keys) == 0 {
+ return types.ForbiddenErrorf("encryption key is not present")
+ }
+
+ lIP := types.GetMinimalIP(net.ParseIP(d.bindAddress))
+ nodes := map[string]net.IP{}
+
+ switch {
+ case isLocal:
+ if err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
+ if !lIP.Equal(pEntry.vtep) {
+ nodes[pEntry.vtep.String()] = types.GetMinimalIP(pEntry.vtep)
+ }
+ return false
+ }); err != nil {
+ log.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err)
+ }
+ default:
+ if len(d.network(nid).endpoints) > 0 {
+ nodes[rIP.String()] = types.GetMinimalIP(rIP)
+ }
+ }
+
+ log.Debugf("List of nodes: %s", nodes)
+
+ if add {
+ for _, rIP := range nodes {
+ if err := setupEncryption(lIP, rIP, vxlanID, d.secMap, d.keys); err != nil {
+ log.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
+ }
+ }
+ } else {
+ if len(nodes) == 0 {
+ if err := removeEncryption(lIP, rIP, d.secMap); err != nil {
+ log.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func setupEncryption(localIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
+ log.Infof("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
+ rIPs := remoteIP.String()
+
+ indices := make([]*spi, 0, len(keys))
+
+ err := programMangle(vni, true)
+ if err != nil {
+ log.Warn(err)
+ }
+
+ for i, k := range keys {
+ spis := &spi{buildSPI(localIP, remoteIP, k.tag), buildSPI(remoteIP, localIP, k.tag)}
+ dir := reverse
+ if i == 0 {
+ dir = bidir
+ }
+ fSA, rSA, err := programSA(localIP, remoteIP, spis, k, dir, true)
+ if err != nil {
+ log.Warn(err)
+ }
+ indices = append(indices, spis)
+ if i != 0 {
+ continue
+ }
+ err = programSP(fSA, rSA, true)
+ if err != nil {
+ log.Warn(err)
+ }
+ }
+
+ em.Lock()
+ em.nodes[rIPs] = indices
+ em.Unlock()
+
+ return nil
+}
+
+func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
+ em.Lock()
+ indices, ok := em.nodes[remoteIP.String()]
+ em.Unlock()
+ if !ok {
+ return nil
+ }
+ for i, idxs := range indices {
+ dir := reverse
+ if i == 0 {
+ dir = bidir
+ }
+ fSA, rSA, err := programSA(localIP, remoteIP, idxs, nil, dir, false)
+ if err != nil {
+ log.Warn(err)
+ }
+ if i != 0 {
+ continue
+ }
+ err = programSP(fSA, rSA, false)
+ if err != nil {
+ log.Warn(err)
+ }
+ }
+ return nil
+}
+
+func programMangle(vni uint32, add bool) (err error) {
+ var (
+ p = strconv.FormatUint(uint64(vxlanPort), 10)
+ c = fmt.Sprintf("0>>22&0x3C@12&0xFFFFFF00=%d", int(vni)<<8)
+ m = strconv.FormatUint(uint64(mark), 10)
+ chain = "OUTPUT"
+ rule = []string{"-p", "udp", "--dport", p, "-m", "u32", "--u32", c, "-j", "MARK", "--set-mark", m}
+ a = "-A"
+ action = "install"
+ )
+
+ if add == iptables.Exists(iptables.Mangle, chain, rule...) {
+ return
+ }
+
+ if !add {
+ a = "-D"
+ action = "remove"
+ }
+
+ if err = iptables.RawCombinedOutput(append([]string{"-t", string(iptables.Mangle), a, chain}, rule...)...); err != nil {
+ log.Warnf("could not %s mangle rule: %v", action, err)
+ }
+
+ return
+}
+
+func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (fSA *netlink.XfrmState, rSA *netlink.XfrmState, err error) {
+ var (
+ crypt *netlink.XfrmStateAlgo
+ action = "Removing"
+ xfrmProgram = netlink.XfrmStateDel
+ )
+
+ if add {
+ action = "Adding"
+ xfrmProgram = netlink.XfrmStateAdd
+ crypt = &netlink.XfrmStateAlgo{Name: "cbc(aes)", Key: k.value}
+ }
+
+ if dir&reverse > 0 {
+ rSA = &netlink.XfrmState{
+ Src: remoteIP,
+ Dst: localIP,
+ Proto: netlink.XFRM_PROTO_ESP,
+ Spi: spi.reverse,
+ Mode: netlink.XFRM_MODE_TRANSPORT,
+ }
+ if add {
+ rSA.Crypt = crypt
+ }
+
+ exists, err := saExists(rSA)
+ if err != nil {
+ exists = !add
+ }
+
+ if add != exists {
+ log.Infof("%s: rSA{%s}", action, rSA)
+ if err := xfrmProgram(rSA); err != nil {
+ log.Warnf("Failed %s rSA{%s}: %v", action, rSA, err)
+ }
+ }
+ }
+
+ if dir&forward > 0 {
+ fSA = &netlink.XfrmState{
+ Src: localIP,
+ Dst: remoteIP,
+ Proto: netlink.XFRM_PROTO_ESP,
+ Spi: spi.forward,
+ Mode: netlink.XFRM_MODE_TRANSPORT,
+ }
+ if add {
+ fSA.Crypt = crypt
+ }
+
+ exists, err := saExists(fSA)
+ if err != nil {
+ exists = !add
+ }
+
+ if add != exists {
+ log.Infof("%s fSA{%s}", action, fSA)
+ if err := xfrmProgram(fSA); err != nil {
+ log.Warnf("Failed %s fSA{%s}: %v.", action, fSA, err)
+ }
+ }
+ }
+
+ return
+}
+
+func programSP(fSA *netlink.XfrmState, rSA *netlink.XfrmState, add bool) error {
+ action := "Removing"
+ xfrmProgram := netlink.XfrmPolicyDel
+ if add {
+ action = "Adding"
+ xfrmProgram = netlink.XfrmPolicyAdd
+ }
+
+ fullMask := net.CIDRMask(8*len(fSA.Src), 8*len(fSA.Src))
+
+ fPol := &netlink.XfrmPolicy{
+ Src: &net.IPNet{IP: fSA.Src, Mask: fullMask},
+ Dst: &net.IPNet{IP: fSA.Dst, Mask: fullMask},
+ Dir: netlink.XFRM_DIR_OUT,
+ Proto: 17,
+ DstPort: 4789,
+ Mark: &netlink.XfrmMark{
+ Value: mark,
+ },
+ Tmpls: []netlink.XfrmPolicyTmpl{
+ {
+ Src: fSA.Src,
+ Dst: fSA.Dst,
+ Proto: netlink.XFRM_PROTO_ESP,
+ Mode: netlink.XFRM_MODE_TRANSPORT,
+ Spi: fSA.Spi,
+ },
+ },
+ }
+
+ exists, err := spExists(fPol)
+ if err != nil {
+ exists = !add
+ }
+
+ if add != exists {
+ log.Infof("%s fSP{%s}", action, fPol)
+ if err := xfrmProgram(fPol); err != nil {
+ log.Warnf("%s fSP{%s}: %v", action, fPol, err)
+ }
+ }
+
+ return nil
+}
+
+func saExists(sa *netlink.XfrmState) (bool, error) {
+ _, err := netlink.XfrmStateGet(sa)
+ switch err {
+ case nil:
+ return true, nil
+ case syscall.ESRCH:
+ return false, nil
+ default:
+ err = fmt.Errorf("Error while checking for SA existence: %v", err)
+ log.Debug(err)
+ return false, err
+ }
+}
+
+func spExists(sp *netlink.XfrmPolicy) (bool, error) {
+ _, err := netlink.XfrmPolicyGet(sp)
+ switch err {
+ case nil:
+ return true, nil
+ case syscall.ENOENT:
+ return false, nil
+ default:
+ err = fmt.Errorf("Error while checking for SP existence: %v", err)
+ log.Debug(err)
+ return false, err
+ }
+}
+
+func buildSPI(src, dst net.IP, st uint32) int {
+ spi := int(st)
+ f := src[len(src)-4:]
+ t := dst[len(dst)-4:]
+ for i := 0; i < 4; i++ {
+ spi = spi ^ (int(f[i])^int(t[3-i]))<<uint32(8*i)
+ }
+ return spi
+}
+
+func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error {
+ d.secMap.Lock()
+ for node, indices := range d.secMap.nodes {
+ idxs, stop := f(node, indices)
+ if idxs != nil {
+ d.secMap.nodes[node] = idxs
+ }
+ if stop {
+ break
+ }
+ }
+ d.secMap.Unlock()
+ return nil
+}
+
+func (d *driver) setKeys(keys []*key) error {
+ if d.keys != nil {
+ return types.ForbiddenErrorf("initial keys are already present")
+ }
+ d.keys = keys
+ log.Infof("Initial encryption keys: %v", d.keys)
+ return nil
+}
+
+// updateKeys allows to add a new key and/or change the primary key and/or prune an existing key
+// The primary key is the key used in transmission and will go in first position in the list.
+func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
+ log.Infof("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
+
+ log.Infof("Current: %v", d.keys)
+
+ var (
+ newIdx = -1
+ priIdx = -1
+ delIdx = -1
+ lIP = types.GetMinimalIP(net.ParseIP(d.bindAddress))
+ )
+
+ d.Lock()
+ // add new
+ if newKey != nil {
+ d.keys = append(d.keys, newKey)
+ newIdx += len(d.keys)
+ }
+ for i, k := range d.keys {
+ if primary != nil && k.tag == primary.tag {
+ priIdx = i
+ }
+ if pruneKey != nil && k.tag == pruneKey.tag {
+ delIdx = i
+ }
+ }
+ d.Unlock()
+
+ if (newKey != nil && newIdx == -1) ||
+ (primary != nil && priIdx == -1) ||
+ (pruneKey != nil && delIdx == -1) {
+ err := types.BadRequestErrorf("cannot find proper key indices while processing key update:"+
+ "(newIdx,priIdx,delIdx):(%d, %d, %d)", newIdx, priIdx, delIdx)
+ log.Warn(err)
+ return err
+ }
+
+ d.secMapWalk(func(rIPs string, spis []*spi) ([]*spi, bool) {
+ rIP := types.GetMinimalIP(net.ParseIP(rIPs))
+ return updateNodeKey(lIP, rIP, spis, d.keys, newIdx, priIdx, delIdx), false
+ })
+
+ d.Lock()
+ // swap primary
+ if priIdx != -1 {
+ swp := d.keys[0]
+ d.keys[0] = d.keys[priIdx]
+ d.keys[priIdx] = swp
+ }
+ // prune
+ if delIdx != -1 {
+ if delIdx == 0 {
+ delIdx = priIdx
+ }
+ d.keys = append(d.keys[:delIdx], d.keys[delIdx+1:]...)
+ }
+ d.Unlock()
+
+ log.Infof("Updated: %v", d.keys)
+
+ return nil
+}
+
+/********************************************************
+ * Steady state: rSA0, rSA1, fSA0, fSP0
+ * Rotation --> %rSA0, +rSA2, +fSA1, +fSP1/-fSP0, -fSA0,
+ * Half state: rSA0, rSA1, rSA2, fSA1, fSP1
+ * Steady state: rSA1, rSA2, fSA1, fSP1
+ *********************************************************/
+
+// Spis and keys are sorted in such away the one in position 0 is the primary
+func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi {
+ log.Infof("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
+
+ spis := idxs
+ log.Infof("Current: %v", spis)
+
+ // add new
+ if newIdx != -1 {
+ spis = append(spis, &spi{
+ forward: buildSPI(lIP, rIP, curKeys[newIdx].tag),
+ reverse: buildSPI(rIP, lIP, curKeys[newIdx].tag),
+ })
+ }
+
+ if delIdx != -1 {
+ // %rSA0
+ rSA0 := &netlink.XfrmState{
+ Src: rIP,
+ Dst: lIP,
+ Proto: netlink.XFRM_PROTO_ESP,
+ Spi: spis[delIdx].reverse,
+ Mode: netlink.XFRM_MODE_TRANSPORT,
+ Crypt: &netlink.XfrmStateAlgo{Name: "cbc(aes)", Key: curKeys[delIdx].value},
+ Limits: netlink.XfrmStateLimits{TimeSoft: timeout},
+ }
+ log.Infof("Updating rSA0{%s}", rSA0)
+ if err := netlink.XfrmStateUpdate(rSA0); err != nil {
+ log.Warnf("Failed to update rSA0{%s}: %v", rSA0, err)
+ }
+ }
+
+ if newIdx > -1 {
+ // +RSA2
+ programSA(lIP, rIP, spis[newIdx], curKeys[newIdx], reverse, true)
+ }
+
+ if priIdx > 0 {
+ // +fSA1
+ fSA1, _, _ := programSA(lIP, rIP, spis[priIdx], curKeys[priIdx], forward, true)
+
+ // +fSP1, -fSP0
+ fullMask := net.CIDRMask(8*len(fSA1.Src), 8*len(fSA1.Src))
+ fSP1 := &netlink.XfrmPolicy{
+ Src: &net.IPNet{IP: fSA1.Src, Mask: fullMask},
+ Dst: &net.IPNet{IP: fSA1.Dst, Mask: fullMask},
+ Dir: netlink.XFRM_DIR_OUT,
+ Proto: 17,
+ DstPort: 4789,
+ Mark: &netlink.XfrmMark{
+ Value: mark,
+ },
+ Tmpls: []netlink.XfrmPolicyTmpl{
+ {
+ Src: fSA1.Src,
+ Dst: fSA1.Dst,
+ Proto: netlink.XFRM_PROTO_ESP,
+ Mode: netlink.XFRM_MODE_TRANSPORT,
+ Spi: fSA1.Spi,
+ },
+ },
+ }
+ log.Infof("Updating fSP{%s}", fSP1)
+ if err := netlink.XfrmPolicyUpdate(fSP1); err != nil {
+ log.Warnf("Failed to update fSP{%s}: %v", fSP1, err)
+ }
+
+ // -fSA0
+ fSA0 := &netlink.XfrmState{
+ Src: lIP,
+ Dst: rIP,
+ Proto: netlink.XFRM_PROTO_ESP,
+ Spi: spis[0].forward,
+ Mode: netlink.XFRM_MODE_TRANSPORT,
+ Crypt: &netlink.XfrmStateAlgo{Name: "cbc(aes)", Key: curKeys[0].value},
+ Limits: netlink.XfrmStateLimits{TimeHard: timeout},
+ }
+ log.Infof("Removing fSA0{%s}", fSA0)
+ if err := netlink.XfrmStateUpdate(fSA0); err != nil {
+ log.Warnf("Failed to remove fSA0{%s}: %v", fSA0, err)
+ }
+ }
+
+ // swap
+ if priIdx > 0 {
+ swp := spis[0]
+ spis[0] = spis[priIdx]
+ spis[priIdx] = swp
+ }
+ // prune
+ if delIdx != -1 {
+ if delIdx == 0 {
+ delIdx = priIdx
+ }
+ spis = append(spis[:delIdx], spis[delIdx+1:]...)
+ }
+
+ log.Infof("Updated: %v", spis)
+
+ return spis
+}
+
+func parseEncryptionKey(value, tag string) (*key, error) {
+ var (
+ k *key
+ err error
+ )
+ if value == "" {
+ return nil, nil
+ }
+ k = &key{}
+ if k.value, err = hex.DecodeString(value); err != nil {
+ return nil, types.BadRequestErrorf("failed to decode key (%s): %v", value, err)
+ }
+ t, err := strconv.ParseUint(tag, 10, 64)
+ if err != nil {
+ return nil, types.BadRequestErrorf("failed to decode tag (%s): %v", tag, err)
+ }
+ k.tag = uint32(t)
+ return k, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go
index 46efd3f051..48a9fcd25e 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go
@@ -3,12 +3,12 @@ package overlay
import (
"fmt"
"net"
- "strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/types"
- "github.com/vishvananda/netlink"
+ "github.com/gogo/protobuf/proto"
)
// Join method is invoked when a Sandbox is attached to an endpoint.
@@ -27,6 +27,10 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
return fmt.Errorf("could not find endpoint with id %s", eid)
}
+ if n.secure && len(d.keys) == 0 {
+ return fmt.Errorf("cannot join secure network: encryption keys not present")
+ }
+
s := n.getSubnetforIP(ep.addr)
if s == nil {
return fmt.Errorf("could not find subnet for endpoint %s", eid)
@@ -57,14 +61,16 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
ep.ifName = containerIfName
+ nlh := ns.NlHandle()
+
// Set the container interface and its peer MTU to 1450 to allow
// for 50 bytes vxlan encap (inner eth header(14) + outer IP(20) +
// outer UDP(8) + vxlan header(8))
- veth, err := netlink.LinkByName(overlayIfName)
+ veth, err := nlh.LinkByName(overlayIfName)
if err != nil {
return fmt.Errorf("cound not find link by name %s: %v", overlayIfName, err)
}
- err = netlink.LinkSetMTU(veth, vxlanVethMTU)
+ err = nlh.LinkSetMTU(veth, vxlanVethMTU)
if err != nil {
return err
}
@@ -74,16 +80,16 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
return fmt.Errorf("could not add veth pair inside the network sandbox: %v", err)
}
- veth, err = netlink.LinkByName(containerIfName)
+ veth, err = nlh.LinkByName(containerIfName)
if err != nil {
return fmt.Errorf("could not find link by name %s: %v", containerIfName, err)
}
- err = netlink.LinkSetMTU(veth, vxlanVethMTU)
+ err = nlh.LinkSetMTU(veth, vxlanVethMTU)
if err != nil {
return err
}
- if err := netlink.LinkSetHardwareAddr(veth, ep.mac); err != nil {
+ if err := nlh.LinkSetHardwareAddr(veth, ep.mac); err != nil {
return fmt.Errorf("could not set mac address (%v) to the container interface: %v", ep.mac, err)
}
@@ -106,7 +112,20 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac,
net.ParseIP(d.bindAddress), true)
- if err := jinfo.AddTableEntry(ovPeerTable, eid, []byte(fmt.Sprintf("%s,%s,%s", ep.addr, ep.mac, d.bindAddress))); err != nil {
+ if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
+ log.Warn(err)
+ }
+
+ buf, err := proto.Marshal(&PeerRecord{
+ EndpointIP: ep.addr.String(),
+ EndpointMAC: ep.mac.String(),
+ TunnelEndpointIP: d.bindAddress,
+ })
+ if err != nil {
+ return err
+ }
+
+ if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil {
log.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
}
@@ -122,27 +141,34 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
}
eid := key
- values := strings.Split(string(value), ",")
- if len(values) < 3 {
- log.Errorf("Invalid value %s received through event notify", string(value))
+
+ var peer PeerRecord
+ if err := proto.Unmarshal(value, &peer); err != nil {
+ log.Errorf("Failed to unmarshal peer record: %v", err)
return
}
- addr, err := types.ParseCIDR(values[0])
+ // Ignore local peers. We already know about them and they
+ // should not be added to vxlan fdb.
+ if peer.TunnelEndpointIP == d.bindAddress {
+ return
+ }
+
+ addr, err := types.ParseCIDR(peer.EndpointIP)
if err != nil {
- log.Errorf("Invalid peer IP %s received in event notify", values[0])
+ log.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
return
}
- mac, err := net.ParseMAC(values[1])
+ mac, err := net.ParseMAC(peer.EndpointMAC)
if err != nil {
- log.Errorf("Invalid mac %s received in event notify", values[1])
+ log.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
return
}
- vtep := net.ParseIP(values[2])
+ vtep := net.ParseIP(peer.TunnelEndpointIP)
if vtep == nil {
- log.Errorf("Invalid VTEP %s received in event notify", values[2])
+ log.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
return
}
@@ -181,5 +207,9 @@ func (d *driver) Leave(nid, eid string) error {
n.leaveSandbox()
+ if err := d.checkEncryption(nid, nil, 0, true, false); err != nil {
+ log.Warn(err)
+ }
+
return nil
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
index 2dd288fd26..96757abc4e 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
@@ -7,7 +7,7 @@ import (
log "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netutils"
- "github.com/vishvananda/netlink"
+ "github.com/docker/libnetwork/ns"
)
type endpointTable map[string]*endpoint
@@ -84,6 +84,8 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
func (d *driver) DeleteEndpoint(nid, eid string) error {
+ nlh := ns.NlHandle()
+
if err := validateID(nid, eid); err != nil {
return err
}
@@ -104,12 +106,12 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
return nil
}
- link, err := netlink.LinkByName(ep.ifName)
+ link, err := nlh.LinkByName(ep.ifName)
if err != nil {
log.Debugf("Failed to retrieve interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err)
return nil
}
- if err := netlink.LinkDel(link); err != nil {
+ if err := nlh.LinkDel(link); err != nil {
log.Debugf("Failed to delete interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err)
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go
index 893f8da314..05ed34a8fa 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go
@@ -16,16 +16,20 @@ import (
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/resolvconf"
"github.com/docker/libnetwork/types"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
)
var (
- hostMode bool
- hostModeOnce sync.Once
+ hostMode bool
+ networkOnce sync.Once
+ networkMu sync.Mutex
+ vniTbl = make(map[uint32]string)
)
type networkTable map[string]*network
@@ -58,6 +62,7 @@ type network struct {
initEpoch int
initErr error
subnets []*subnet
+ secure bool
sync.Mutex
}
@@ -106,6 +111,9 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d
vnis = append(vnis, uint32(vni))
}
}
+ if _, ok := optMap["secure"]; ok {
+ n.secure = true
+ }
}
// If we are getting vnis from libnetwork, either we get for
@@ -159,7 +167,18 @@ func (d *driver) DeleteNetwork(nid string) error {
d.deleteNetwork(nid)
- return n.releaseVxlanID()
+ vnis, err := n.releaseVxlanID()
+ if err != nil {
+ return err
+ }
+
+ if n.secure {
+ for _, vni := range vnis {
+ programMangle(vni, false)
+ }
+ }
+
+ return nil
}
func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
@@ -249,7 +268,48 @@ func (n *network) destroySandbox() {
}
}
-func setHostMode() {
+func populateVNITbl() {
+ filepath.Walk(filepath.Dir(osl.GenerateKey("walk")),
+ func(path string, info os.FileInfo, err error) error {
+ _, fname := filepath.Split(path)
+
+ if len(strings.Split(fname, "-")) <= 1 {
+ return nil
+ }
+
+ ns, err := netns.GetFromPath(path)
+ if err != nil {
+ logrus.Errorf("Could not open namespace path %s during vni population: %v", path, err)
+ return nil
+ }
+ defer ns.Close()
+
+ nlh, err := netlink.NewHandleAt(ns)
+ if err != nil {
+ logrus.Errorf("Could not open netlink handle during vni population for ns %s: %v", path, err)
+ return nil
+ }
+ defer nlh.Delete()
+
+ links, err := nlh.LinkList()
+ if err != nil {
+ logrus.Errorf("Failed to list interfaces during vni population for ns %s: %v", path, err)
+ return nil
+ }
+
+ for _, l := range links {
+ if l.Type() == "vxlan" {
+ vniTbl[uint32(l.(*netlink.Vxlan).VxlanId)] = path
+ }
+ }
+
+ return nil
+ })
+}
+
+func networkOnceInit() {
+ populateVNITbl()
+
if os.Getenv("_OVERLAY_HOST_MODE") != "" {
hostMode = true
return
@@ -264,24 +324,24 @@ func setHostMode() {
defer deleteInterface("testvxlan")
path := "/proc/self/ns/net"
- f, err := os.OpenFile(path, os.O_RDONLY, 0)
+ hNs, err := netns.GetFromPath(path)
if err != nil {
- logrus.Errorf("Failed to open path %s for network namespace for setting host mode: %v", path, err)
+ logrus.Errorf("Failed to get network namespace from path %s while setting host mode: %v", path, err)
return
}
- defer f.Close()
+ defer hNs.Close()
- nsFD := f.Fd()
+ nlh := ns.NlHandle()
- iface, err := netlink.LinkByName("testvxlan")
+ iface, err := nlh.LinkByName("testvxlan")
if err != nil {
- logrus.Errorf("Failed to get link testvxlan: %v", err)
+ logrus.Errorf("Failed to get link testvxlan while setting host mode: %v", err)
return
}
// If we are not able to move the vxlan interface to a namespace
// then fallback to host mode
- if err := netlink.LinkSetNsFd(iface, int(nsFD)); err != nil {
+ if err := nlh.LinkSetNsFd(iface, int(hNs)); err != nil {
hostMode = true
}
}
@@ -301,7 +361,11 @@ func (n *network) generateBridgeName(s *subnet) string {
id = n.id[:5]
}
- return "ov-" + fmt.Sprintf("%06x", n.vxlanID(s)) + "-" + id
+ return n.getBridgeNamePrefix(s) + "-" + id
+}
+
+func (n *network) getBridgeNamePrefix(s *subnet) string {
+ return "ov-" + fmt.Sprintf("%06x", n.vxlanID(s))
}
func isOverlap(nw *net.IPNet) bool {
@@ -328,15 +392,40 @@ func (n *network) initSubnetSandbox(s *subnet) error {
if hostMode {
// Try to delete stale bridge interface if it exists
- deleteInterface(brName)
+ if err := deleteInterface(brName); err != nil {
+ deleteInterfaceBySubnet(n.getBridgeNamePrefix(s), s)
+ }
// Try to delete the vxlan interface by vni if already present
- deleteVxlanByVNI(n.vxlanID(s))
+ deleteVxlanByVNI("", n.vxlanID(s))
if isOverlap(s.subnetIP) {
return fmt.Errorf("overlay subnet %s has conflicts in the host while running in host mode", s.subnetIP.String())
}
}
+ if !hostMode {
+ // Try to find this subnet's vni is being used in some
+ // other namespace by looking at vniTbl that we just
+ // populated in the once init. If a hit is found then
+ // it must a stale namespace from previous
+ // life. Destroy it completely and reclaim resourced.
+ networkMu.Lock()
+ path, ok := vniTbl[n.vxlanID(s)]
+ networkMu.Unlock()
+
+ if ok {
+ deleteVxlanByVNI(path, n.vxlanID(s))
+ if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil {
+ logrus.Errorf("unmount of %s failed: %v", path, err)
+ }
+ os.Remove(path)
+
+ networkMu.Lock()
+ delete(vniTbl, n.vxlanID(s))
+ networkMu.Unlock()
+ }
+ }
+
// create a bridge and vxlan device for this subnet and move it to the sandbox
sbox := n.sandbox()
@@ -382,8 +471,23 @@ func (n *network) cleanupStaleSandboxes() {
pattern := pList[1]
if strings.Contains(n.id, pattern) {
+ // Delete all vnis
+ deleteVxlanByVNI(path, 0)
syscall.Unmount(path, syscall.MNT_DETACH)
os.Remove(path)
+
+ // Now that we have destroyed this
+ // sandbox, remove all references to
+ // it in vniTbl so that we don't
+ // inadvertently destroy the sandbox
+ // created in this life.
+ networkMu.Lock()
+ for vni, tblPath := range vniTbl {
+ if tblPath == path {
+ delete(vniTbl, vni)
+ }
+ }
+ networkMu.Unlock()
}
return nil
@@ -395,7 +499,7 @@ func (n *network) initSandbox() error {
n.initEpoch++
n.Unlock()
- hostModeOnce.Do(setHostMode)
+ networkOnce.Do(networkOnceInit)
if hostMode {
if err := addNetworkChain(n.id[:12]); err != nil {
@@ -425,7 +529,10 @@ func (n *network) initSandbox() error {
}
})
- go n.watchMiss(nlSock)
+ if nlSock != nil {
+ go n.watchMiss(nlSock)
+ }
+
return nil
}
@@ -451,6 +558,12 @@ func (n *network) watchMiss(nlSock *nl.NetlinkSocket) {
if neigh.IP.To4() == nil {
continue
}
+
+ // Not any of the network's subnets. Ignore.
+ if !n.contains(neigh.IP) {
+ continue
+ }
+
logrus.Debugf("miss notification for dest IP, %v", neigh.IP.String())
if neigh.State&(netlink.NUD_STALE|netlink.NUD_INCOMPLETE) == 0 {
@@ -549,6 +662,8 @@ func (n *network) KeyPrefix() []string {
}
func (n *network) Value() []byte {
+ m := map[string]interface{}{}
+
netJSON := []*subnetJSON{}
for _, s := range n.subnets {
@@ -561,10 +676,17 @@ func (n *network) Value() []byte {
}
b, err := json.Marshal(netJSON)
+ if err != nil {
+ return []byte{}
+ }
+ m["secure"] = n.secure
+ m["subnets"] = netJSON
+ b, err = json.Marshal(m)
if err != nil {
return []byte{}
}
+
return b
}
@@ -586,18 +708,38 @@ func (n *network) Skip() bool {
}
func (n *network) SetValue(value []byte) error {
- var newNet bool
- netJSON := []*subnetJSON{}
-
- err := json.Unmarshal(value, &netJSON)
- if err != nil {
- return err
+ var (
+ m map[string]interface{}
+ newNet bool
+ isMap = true
+ netJSON = []*subnetJSON{}
+ )
+
+ if err := json.Unmarshal(value, &m); err != nil {
+ err := json.Unmarshal(value, &netJSON)
+ if err != nil {
+ return err
+ }
+ isMap = false
}
if len(n.subnets) == 0 {
newNet = true
}
+ if isMap {
+ if val, ok := m["secure"]; ok {
+ n.secure = val.(bool)
+ }
+ bytes, err := json.Marshal(m["subnets"])
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(bytes, &netJSON); err != nil {
+ return err
+ }
+ }
+
for _, sj := range netJSON {
subnetIPstr := sj.SubnetIP
gwIPstr := sj.GwIP
@@ -636,9 +778,9 @@ func (n *network) writeToStore() error {
return n.driver.store.PutObjectAtomic(n)
}
-func (n *network) releaseVxlanID() error {
+func (n *network) releaseVxlanID() ([]uint32, error) {
if len(n.subnets) == 0 {
- return nil
+ return nil, nil
}
if n.driver.store != nil {
@@ -646,22 +788,24 @@ func (n *network) releaseVxlanID() error {
if err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound {
// In both the above cases we can safely assume that the key has been removed by some other
// instance and so simply get out of here
- return nil
+ return nil, nil
}
- return fmt.Errorf("failed to delete network to vxlan id map: %v", err)
+ return nil, fmt.Errorf("failed to delete network to vxlan id map: %v", err)
}
}
-
+ var vnis []uint32
for _, s := range n.subnets {
if n.driver.vxlanIdm != nil {
- n.driver.vxlanIdm.Release(uint64(n.vxlanID(s)))
+ vni := n.vxlanID(s)
+ vnis = append(vnis, vni)
+ n.driver.vxlanIdm.Release(uint64(vni))
}
n.setVxlanID(s, 0)
}
- return nil
+ return vnis, nil
}
func (n *network) obtainVxlanID(s *subnet) error {
@@ -700,6 +844,18 @@ func (n *network) obtainVxlanID(s *subnet) error {
}
}
+// contains return true if the passed ip belongs to one the network's
+// subnets
+func (n *network) contains(ip net.IP) bool {
+ for _, s := range n.subnets {
+ if s.subnetIP.Contains(ip) {
+ return true
+ }
+ }
+
+ return false
+}
+
// getSubnetforIP returns the subnet to which the given IP belongs
func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
for _, s := range n.subnets {
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_utils.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_utils.go
index 42c44b9675..f9f32dec48 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_utils.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_utils.go
@@ -2,11 +2,14 @@ package overlay
import (
"fmt"
+ "strings"
+ "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl"
"github.com/vishvananda/netlink"
- "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
)
func validateID(nid, eid string) error {
@@ -23,15 +26,16 @@ func validateID(nid, eid string) error {
func createVethPair() (string, string, error) {
defer osl.InitOSContext()()
+ nlh := ns.NlHandle()
// Generate a name for what will be the host side pipe interface
- name1, err := netutils.GenerateIfaceName(vethPrefix, vethLen)
+ name1, err := netutils.GenerateIfaceName(nlh, vethPrefix, vethLen)
if err != nil {
return "", "", fmt.Errorf("error generating veth name1: %v", err)
}
// Generate a name for what will be the sandbox side pipe interface
- name2, err := netutils.GenerateIfaceName(vethPrefix, vethLen)
+ name2, err := netutils.GenerateIfaceName(nlh, vethPrefix, vethLen)
if err != nil {
return "", "", fmt.Errorf("error generating veth name2: %v", err)
}
@@ -40,7 +44,7 @@ func createVethPair() (string, string, error) {
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: name1, TxQLen: 0},
PeerName: name2}
- if err := netlink.LinkAdd(veth); err != nil {
+ if err := nlh.LinkAdd(veth); err != nil {
return "", "", fmt.Errorf("error creating veth pair: %v", err)
}
@@ -54,49 +58,94 @@ func createVxlan(name string, vni uint32) error {
LinkAttrs: netlink.LinkAttrs{Name: name},
VxlanId: int(vni),
Learning: true,
- Port: int(nl.Swap16(vxlanPort)), //network endian order
+ Port: vxlanPort,
Proxy: true,
L3miss: true,
L2miss: true,
}
- if err := netlink.LinkAdd(vxlan); err != nil {
+ if err := ns.NlHandle().LinkAdd(vxlan); err != nil {
return fmt.Errorf("error creating vxlan interface: %v", err)
}
return nil
}
+func deleteInterfaceBySubnet(brPrefix string, s *subnet) error {
+ defer osl.InitOSContext()()
+
+ nlh := ns.NlHandle()
+ links, err := nlh.LinkList()
+ if err != nil {
+ return fmt.Errorf("failed to list interfaces while deleting bridge interface by subnet: %v", err)
+ }
+
+ for _, l := range links {
+ name := l.Attrs().Name
+ if _, ok := l.(*netlink.Bridge); ok && strings.HasPrefix(name, brPrefix) {
+ addrList, err := nlh.AddrList(l, netlink.FAMILY_V4)
+ if err != nil {
+ logrus.Errorf("error getting AddressList for bridge %s", name)
+ continue
+ }
+ for _, addr := range addrList {
+ if netutils.NetworkOverlaps(addr.IPNet, s.subnetIP) {
+ err = nlh.LinkDel(l)
+ if err != nil {
+ logrus.Errorf("error deleting bridge (%s) with subnet %v: %v", name, addr.IPNet, err)
+ }
+ }
+ }
+ }
+ }
+ return nil
+
+}
+
func deleteInterface(name string) error {
defer osl.InitOSContext()()
- link, err := netlink.LinkByName(name)
+ link, err := ns.NlHandle().LinkByName(name)
if err != nil {
return fmt.Errorf("failed to find interface with name %s: %v", name, err)
}
- if err := netlink.LinkDel(link); err != nil {
+ if err := ns.NlHandle().LinkDel(link); err != nil {
return fmt.Errorf("error deleting interface with name %s: %v", name, err)
}
return nil
}
-func deleteVxlanByVNI(vni uint32) error {
+func deleteVxlanByVNI(path string, vni uint32) error {
defer osl.InitOSContext()()
- links, err := netlink.LinkList()
+ nlh := ns.NlHandle()
+ if path != "" {
+ ns, err := netns.GetFromPath(path)
+ if err != nil {
+ return fmt.Errorf("failed to get ns handle for %s: %v", path, err)
+ }
+ defer ns.Close()
+
+ nlh, err = netlink.NewHandleAt(ns)
+ if err != nil {
+ return fmt.Errorf("failed to get netlink handle for ns %s: %v", path, err)
+ }
+ defer nlh.Delete()
+ }
+
+ links, err := nlh.LinkList()
if err != nil {
return fmt.Errorf("failed to list interfaces while deleting vxlan interface by vni: %v", err)
}
for _, l := range links {
- if l.Type() == "vxlan" && l.(*netlink.Vxlan).VxlanId == int(vni) {
- err = netlink.LinkDel(l)
+ if l.Type() == "vxlan" && (vni == 0 || l.(*netlink.Vxlan).VxlanId == int(vni)) {
+ err = nlh.LinkDel(l)
if err != nil {
return fmt.Errorf("error deleting vxlan interface with id %d: %v", vni, err)
}
-
return nil
}
}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.go
index e4d487adaa..d9c5ab7961 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.go
@@ -1,5 +1,7 @@
package overlay
+//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto
+
import (
"fmt"
"net"
@@ -20,7 +22,7 @@ const (
vethPrefix = "veth"
vethLen = 7
vxlanIDStart = 256
- vxlanIDEnd = 1000
+ vxlanIDEnd = (1 << 24) - 1
vxlanPort = 4789
vxlanVethMTU = 1450
)
@@ -35,12 +37,14 @@ type driver struct {
neighIP string
config map[string]interface{}
peerDb peerNetworkMap
+ secMap *encrMap
serfInstance *serf.Serf
networks networkTable
store datastore.DataStore
vxlanIdm *idm.Idm
once sync.Once
joinOnce sync.Once
+ keys []*key
sync.Mutex
}
@@ -49,12 +53,12 @@ func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
c := driverapi.Capability{
DataScope: datastore.GlobalScope,
}
-
d := &driver{
networks: networkTable{},
peerDb: peerNetworkMap{
mp: map[string]*peerMap{},
},
+ secMap: &encrMap{nodes: map[string][]*spi{}},
config: config,
}
@@ -207,6 +211,7 @@ func (d *driver) pushLocalEndpointEvent(action, nid, eid string) {
// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
+ var err error
switch dType {
case discoverapi.NodeDiscovery:
nodeData, ok := data.(discoverapi.NodeDiscoveryData)
@@ -215,7 +220,6 @@ func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{})
}
d.nodeJoin(nodeData.Address, nodeData.Self)
case discoverapi.DatastoreConfig:
- var err error
if d.store != nil {
return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")
}
@@ -227,6 +231,39 @@ func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{})
if err != nil {
return types.InternalErrorf("failed to initialize data store: %v", err)
}
+ case discoverapi.EncryptionKeysConfig:
+ encrData, ok := data.(discoverapi.DriverEncryptionConfig)
+ if !ok {
+ return fmt.Errorf("invalid encryption key notification data")
+ }
+ keys := make([]*key, 0, len(encrData.Keys))
+ for i := 0; i < len(encrData.Keys); i++ {
+ k, err := parseEncryptionKey(encrData.Keys[i], encrData.Tags[i])
+ if err != nil {
+ return err
+ }
+ keys = append(keys, k)
+ }
+ d.setKeys(keys)
+ case discoverapi.EncryptionKeysUpdate:
+ var newKey, delKey, priKey *key
+ encrData, ok := data.(discoverapi.DriverEncryptionUpdate)
+ if !ok {
+ return fmt.Errorf("invalid encryption key notification data")
+ }
+ newKey, err = parseEncryptionKey(encrData.Key, encrData.Tag)
+ if err != nil {
+ return err
+ }
+ priKey, err = parseEncryptionKey(encrData.Primary, encrData.PrimaryTag)
+ if err != nil {
+ return err
+ }
+ delKey, err = parseEncryptionKey(encrData.Prune, encrData.PruneTag)
+ if err != nil {
+ return err
+ }
+ d.updateKeys(newKey, priKey, delKey)
default:
}
return nil
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go
new file mode 100644
index 0000000000..cfa0eeeae4
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go
@@ -0,0 +1,468 @@
+// Code generated by protoc-gen-gogo.
+// source: overlay.proto
+// DO NOT EDIT!
+
+/*
+ Package overlay is a generated protocol buffer package.
+
+ It is generated from these files:
+ overlay.proto
+
+ It has these top-level messages:
+ PeerRecord
+*/
+package overlay
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+// PeerRecord defines the information corresponding to a peer
+// container in the overlay network.
+type PeerRecord struct {
+ // Endpoint IP is the IP of the container attachment on the
+ // given overlay network.
+ EndpointIP string `protobuf:"bytes,1,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
+ // Endpoint MAC is the mac address of the container attachment
+ // on the given overlay network.
+ EndpointMAC string `protobuf:"bytes,2,opt,name=endpoint_mac,json=endpointMac,proto3" json:"endpoint_mac,omitempty"`
+ // Tunnel Endpoint IP defines the host IP for the host in
+ // which this container is running and can be reached by
+ // building a tunnel to that host IP.
+ TunnelEndpointIP string `protobuf:"bytes,3,opt,name=tunnel_endpoint_ip,json=tunnelEndpointIp,proto3" json:"tunnel_endpoint_ip,omitempty"`
+}
+
+func (m *PeerRecord) Reset() { *m = PeerRecord{} }
+func (*PeerRecord) ProtoMessage() {}
+func (*PeerRecord) Descriptor() ([]byte, []int) { return fileDescriptorOverlay, []int{0} }
+
+func init() {
+ proto.RegisterType((*PeerRecord)(nil), "overlay.PeerRecord")
+}
+func (this *PeerRecord) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&overlay.PeerRecord{")
+ s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
+ s = append(s, "EndpointMAC: "+fmt.Sprintf("%#v", this.EndpointMAC)+",\n")
+ s = append(s, "TunnelEndpointIP: "+fmt.Sprintf("%#v", this.TunnelEndpointIP)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringOverlay(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringOverlay(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *PeerRecord) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PeerRecord) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.EndpointIP) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintOverlay(data, i, uint64(len(m.EndpointIP)))
+ i += copy(data[i:], m.EndpointIP)
+ }
+ if len(m.EndpointMAC) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintOverlay(data, i, uint64(len(m.EndpointMAC)))
+ i += copy(data[i:], m.EndpointMAC)
+ }
+ if len(m.TunnelEndpointIP) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintOverlay(data, i, uint64(len(m.TunnelEndpointIP)))
+ i += copy(data[i:], m.TunnelEndpointIP)
+ }
+ return i, nil
+}
+
+func encodeFixed64Overlay(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Overlay(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintOverlay(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *PeerRecord) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.EndpointIP)
+ if l > 0 {
+ n += 1 + l + sovOverlay(uint64(l))
+ }
+ l = len(m.EndpointMAC)
+ if l > 0 {
+ n += 1 + l + sovOverlay(uint64(l))
+ }
+ l = len(m.TunnelEndpointIP)
+ if l > 0 {
+ n += 1 + l + sovOverlay(uint64(l))
+ }
+ return n
+}
+
+func sovOverlay(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozOverlay(x uint64) (n int) {
+ return sovOverlay(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *PeerRecord) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PeerRecord{`,
+ `EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
+ `EndpointMAC:` + fmt.Sprintf("%v", this.EndpointMAC) + `,`,
+ `TunnelEndpointIP:` + fmt.Sprintf("%v", this.TunnelEndpointIP) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringOverlay(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *PeerRecord) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthOverlay
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EndpointIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndpointMAC", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthOverlay
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EndpointMAC = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TunnelEndpointIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthOverlay
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TunnelEndpointIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipOverlay(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthOverlay
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipOverlay(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthOverlay
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowOverlay
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipOverlay(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthOverlay = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowOverlay = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorOverlay = []byte{
+ // 195 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x2f, 0x4b, 0x2d,
+ 0xca, 0x49, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x44, 0xd2,
+ 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x69, 0x2b, 0x23, 0x17, 0x57, 0x40,
+ 0x6a, 0x6a, 0x51, 0x50, 0x6a, 0x72, 0x7e, 0x51, 0x8a, 0x90, 0x3e, 0x17, 0x77, 0x6a, 0x5e, 0x4a,
+ 0x41, 0x7e, 0x66, 0x5e, 0x49, 0x7c, 0x66, 0x81, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xdf,
+ 0xa3, 0x7b, 0xf2, 0x5c, 0xae, 0x50, 0x61, 0xcf, 0x80, 0x20, 0x2e, 0x98, 0x12, 0xcf, 0x02, 0x21,
+ 0x23, 0x2e, 0x1e, 0xb8, 0x86, 0xdc, 0xc4, 0x64, 0x09, 0x26, 0xb0, 0x0e, 0x7e, 0xa0, 0x0e, 0x6e,
+ 0x98, 0x0e, 0x5f, 0x47, 0xe7, 0x20, 0xb8, 0xa9, 0xbe, 0x89, 0xc9, 0x42, 0x4e, 0x5c, 0x42, 0x25,
+ 0xa5, 0x79, 0x79, 0xa9, 0x39, 0xf1, 0xc8, 0x76, 0x31, 0x83, 0x75, 0x8a, 0x00, 0x75, 0x0a, 0x84,
+ 0x80, 0x65, 0x91, 0x6c, 0x14, 0x28, 0x41, 0x15, 0x29, 0x70, 0x92, 0xb8, 0xf1, 0x50, 0x8e, 0xe1,
+ 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10, 0x3f, 0x00, 0xe2,
+ 0x24, 0x36, 0xb0, 0xc7, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd7, 0x7d, 0x7d, 0x08,
+ 0x01, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.proto b/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.proto
new file mode 100644
index 0000000000..45b8c9de7e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.proto
@@ -0,0 +1,27 @@
+syntax = "proto3";
+
+import "gogoproto/gogo.proto";
+
+package overlay;
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.stringer_all) = true;
+option (gogoproto.gostring_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.goproto_stringer_all) = false;
+
+// PeerRecord defines the information corresponding to a peer
+// container in the overlay network.
+message PeerRecord {
+ // Endpoint IP is the IP of the container attachment on the
+ // given overlay network.
+ string endpoint_ip = 1 [(gogoproto.customname) = "EndpointIP"];
+ // Endpoint MAC is the mac address of the container attachment
+ // on the given overlay network.
+ string endpoint_mac = 2 [(gogoproto.customname) = "EndpointMAC"];
+ // Tunnel Endpoint IP defines the host IP for the host in
+ // which this container is running and can be reached by
+ // building a tunnel to that host IP.
+ string tunnel_endpoint_ip = 3 [(gogoproto.customname) = "TunnelEndpointIP"];
+} \ No newline at end of file
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go
new file mode 100644
index 0000000000..c00d4c4d40
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go
@@ -0,0 +1,248 @@
+package ovmanager
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/datastore"
+ "github.com/docker/libnetwork/discoverapi"
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/idm"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/types"
+)
+
+const (
+ networkType = "overlay"
+ vxlanIDStart = 256
+ vxlanIDEnd = 1000
+)
+
+type networkTable map[string]*network
+
+type driver struct {
+ config map[string]interface{}
+ networks networkTable
+ store datastore.DataStore
+ vxlanIdm *idm.Idm
+ sync.Mutex
+}
+
+type subnet struct {
+ subnetIP *net.IPNet
+ gwIP *net.IPNet
+ vni uint32
+}
+
+type network struct {
+ id string
+ driver *driver
+ subnets []*subnet
+ sync.Mutex
+}
+
+// Init registers a new instance of overlay driver
+func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
+ var err error
+ c := driverapi.Capability{
+ DataScope: datastore.GlobalScope,
+ }
+
+ d := &driver{
+ networks: networkTable{},
+ config: config,
+ }
+
+ d.vxlanIdm, err = idm.New(nil, "vxlan-id", vxlanIDStart, vxlanIDEnd)
+ if err != nil {
+ return fmt.Errorf("failed to initialize vxlan id manager: %v", err)
+ }
+
+ return dc.RegisterDriver(networkType, d, c)
+}
+
+func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) {
+ if id == "" {
+ return nil, fmt.Errorf("invalid network id for overlay network")
+ }
+
+ if ipV4Data == nil {
+ return nil, fmt.Errorf("empty ipv4 data passed during overlay network creation")
+ }
+
+ n := &network{
+ id: id,
+ driver: d,
+ subnets: []*subnet{},
+ }
+
+ opts := make(map[string]string)
+ vxlanIDList := make([]uint32, 0, len(ipV4Data))
+ for key, val := range option {
+ if key == netlabel.OverlayVxlanIDList {
+ logrus.Debugf("overlay network option: %s", val)
+ valStrList := strings.Split(val, ",")
+ for _, idStr := range valStrList {
+ vni, err := strconv.Atoi(idStr)
+ if err != nil {
+ return nil, fmt.Errorf("invalid vxlan id value %q passed", idStr)
+ }
+
+ vxlanIDList = append(vxlanIDList, uint32(vni))
+ }
+ } else {
+ opts[key] = val
+ }
+ }
+
+ for i, ipd := range ipV4Data {
+ s := &subnet{
+ subnetIP: ipd.Pool,
+ gwIP: ipd.Gateway,
+ }
+
+ if len(vxlanIDList) > i {
+ s.vni = vxlanIDList[i]
+ }
+
+ if err := n.obtainVxlanID(s); err != nil {
+ log.Printf("Could not obtain vxlan id for pool %s: %v", s.subnetIP, err)
+ }
+
+ n.subnets = append(n.subnets, s)
+ }
+
+ val := fmt.Sprintf("%d", n.subnets[0].vni)
+ for _, s := range n.subnets[1:] {
+ val = val + fmt.Sprintf(",%d", s.vni)
+ }
+ opts[netlabel.OverlayVxlanIDList] = val
+
+ d.Lock()
+ d.networks[id] = n
+ d.Unlock()
+
+ return opts, nil
+}
+
+func (d *driver) NetworkFree(id string) error {
+ if id == "" {
+ return fmt.Errorf("invalid network id passed while freeing overlay network")
+ }
+
+ d.Lock()
+ n, ok := d.networks[id]
+ d.Unlock()
+
+ if !ok {
+ return fmt.Errorf("overlay network with id %s not found", id)
+ }
+
+ // Release all vxlan IDs in one shot.
+ n.releaseVxlanID()
+
+ d.Lock()
+ delete(d.networks, id)
+ d.Unlock()
+
+ return nil
+}
+
+func (n *network) obtainVxlanID(s *subnet) error {
+ var (
+ err error
+ vni uint64
+ )
+
+ n.Lock()
+ vni = uint64(s.vni)
+ n.Unlock()
+
+ if vni == 0 {
+ vni, err = n.driver.vxlanIdm.GetID()
+ if err != nil {
+ return err
+ }
+
+ n.Lock()
+ s.vni = uint32(vni)
+ n.Unlock()
+ return nil
+ }
+
+ return n.driver.vxlanIdm.GetSpecificID(vni)
+}
+
+func (n *network) releaseVxlanID() {
+ n.Lock()
+ vnis := make([]uint32, 0, len(n.subnets))
+ for _, s := range n.subnets {
+ vnis = append(vnis, s.vni)
+ s.vni = 0
+ }
+ n.Unlock()
+
+ for _, vni := range vnis {
+ n.driver.vxlanIdm.Release(uint64(vni))
+ }
+}
+
+func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
+}
+
+func (d *driver) DeleteNetwork(nid string) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+func (d *driver) DeleteEndpoint(nid, eid string) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) {
+ return nil, types.NotImplementedErrorf("not implemented")
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid string) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+func (d *driver) Type() string {
+ return networkType
+}
+
+// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
+func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster
+func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
+ return types.NotImplementedErrorf("not implemented")
+}
+
+func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
+ return types.NotImplementedErrorf("not implemented")
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/peerdb.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/peerdb.go
index 3676136434..2c1112fc1d 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/peerdb.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/peerdb.go
@@ -5,6 +5,8 @@ import (
"net"
"sync"
"syscall"
+
+ log "github.com/Sirupsen/logrus"
)
const ovPeerTable = "overlay_peer_table"
@@ -88,7 +90,7 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool
for pKeyStr, pEntry := range pMap.mp {
var pKey peerKey
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
- fmt.Printf("peer key scan failed: %v", err)
+ log.Warnf("Peer key scan on network %s failed: %v", nid, err)
}
if f(&pKey, &pEntry) {
@@ -273,6 +275,10 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err)
}
+ if err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil {
+ log.Warn(err)
+ }
+
// Add neighbor entry for the peer IP
if err := sbox.AddNeighbor(peerIP, peerMac, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
return fmt.Errorf("could not add neigbor entry into the sandbox: %v", err)
@@ -318,6 +324,10 @@ func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMas
return fmt.Errorf("could not delete neigbor entry into the sandbox: %v", err)
}
+ if err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {
+ log.Warn(err)
+ }
+
return nil
}
diff --git a/vendor/src/github.com/docker/libnetwork/drvregistry/drvregistry.go b/vendor/src/github.com/docker/libnetwork/drvregistry/drvregistry.go
index 6c1804dfa9..f12da15c0b 100644
--- a/vendor/src/github.com/docker/libnetwork/drvregistry/drvregistry.go
+++ b/vendor/src/github.com/docker/libnetwork/drvregistry/drvregistry.go
@@ -163,7 +163,7 @@ func (r *DrvRegistry) initIPAMs(lDs, gDs interface{}) error {
remoteIpam.Init,
nullIpam.Init,
} {
- if err := fn(r, lDs, gDs); err != nil {
+ if err := fn(r, nil, gDs); err != nil {
return err
}
}
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint.go b/vendor/src/github.com/docker/libnetwork/endpoint.go
index 5335945690..f84e8cb79c 100644
--- a/vendor/src/github.com/docker/libnetwork/endpoint.go
+++ b/vendor/src/github.com/docker/libnetwork/endpoint.go
@@ -69,6 +69,8 @@ type endpoint struct {
myAliases []string
svcID string
svcName string
+ virtualIP net.IP
+ ingressPorts []*PortConfig
dbIndex uint64
dbExists bool
sync.Mutex
@@ -93,6 +95,8 @@ func (ep *endpoint) MarshalJSON() ([]byte, error) {
epMap["myAliases"] = ep.myAliases
epMap["svcName"] = ep.svcName
epMap["svcID"] = ep.svcID
+ epMap["virtualIP"] = ep.virtualIP.String()
+ epMap["ingressPorts"] = ep.ingressPorts
return json.Marshal(epMap)
}
@@ -186,6 +190,15 @@ func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
ep.svcID = si.(string)
}
+ if vip, ok := epMap["virtualIP"]; ok {
+ ep.virtualIP = net.ParseIP(vip.(string))
+ }
+
+ pc, _ := json.Marshal(epMap["ingressPorts"])
+ var ingressPorts []*PortConfig
+ json.Unmarshal(pc, &ingressPorts)
+ ep.ingressPorts = ingressPorts
+
ma, _ := json.Marshal(epMap["myAliases"])
var myAliases []string
json.Unmarshal(ma, &myAliases)
@@ -212,6 +225,10 @@ func (ep *endpoint) CopyTo(o datastore.KVObject) error {
dstEp.disableResolution = ep.disableResolution
dstEp.svcName = ep.svcName
dstEp.svcID = ep.svcID
+ dstEp.virtualIP = ep.virtualIP
+
+ dstEp.ingressPorts = make([]*PortConfig, len(ep.ingressPorts))
+ copy(dstEp.ingressPorts, ep.ingressPorts)
if ep.iface != nil {
dstEp.iface = &endpointInterface{}
@@ -429,7 +446,7 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
}()
// Watch for service records
- if !n.getController().cfg.Daemon.IsAgent {
+ if !n.getController().isAgent() {
n.getController().watchSvcRecord(ep)
}
@@ -533,13 +550,16 @@ func (ep *endpoint) rename(name string) error {
n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), false)
oldName := ep.name
+ oldAnonymous := ep.anonymous
ep.name = name
+ ep.anonymous = false
n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), true)
defer func() {
if err != nil {
n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), false)
ep.name = oldName
+ ep.anonymous = oldAnonymous
n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), true)
}
}()
@@ -756,7 +776,7 @@ func (ep *endpoint) Delete(force bool) error {
}()
// unwatch for service records
- if !n.getController().cfg.Daemon.IsAgent {
+ if !n.getController().isAgent() {
n.getController().unWatchSvcRecord(ep)
}
@@ -832,11 +852,25 @@ func EndpointOptionGeneric(generic map[string]interface{}) EndpointOption {
}
}
+var (
+ linkLocalMask = net.CIDRMask(16, 32)
+ linkLocalMaskIPv6 = net.CIDRMask(64, 128)
+)
+
// CreateOptionIpam function returns an option setter for the ipam configuration for this endpoint
-func CreateOptionIpam(ipV4, ipV6 net.IP, ipamOptions map[string]string) EndpointOption {
+func CreateOptionIpam(ipV4, ipV6 net.IP, llIPs []net.IP, ipamOptions map[string]string) EndpointOption {
return func(ep *endpoint) {
ep.prefAddress = ipV4
ep.prefAddressV6 = ipV6
+ if len(llIPs) != 0 {
+ for _, ip := range llIPs {
+ nw := &net.IPNet{IP: ip, Mask: linkLocalMask}
+ if ip.To4() == nil {
+ nw.Mask = linkLocalMaskIPv6
+ }
+ ep.iface.llAddrs = append(ep.iface.llAddrs, nw)
+ }
+ }
ep.ipamOptions = ipamOptions
}
}
@@ -892,10 +926,12 @@ func CreateOptionAlias(name string, alias string) EndpointOption {
}
// CreateOptionService function returns an option setter for setting service binding configuration
-func CreateOptionService(name, id string) EndpointOption {
+func CreateOptionService(name, id string, vip net.IP, ingressPorts []*PortConfig) EndpointOption {
return func(ep *endpoint) {
ep.svcName = name
ep.svcID = id
+ ep.virtualIP = vip
+ ep.ingressPorts = ingressPorts
}
}
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint_info.go b/vendor/src/github.com/docker/libnetwork/endpoint_info.go
index cc7aa17a66..cf295a4229 100644
--- a/vendor/src/github.com/docker/libnetwork/endpoint_info.go
+++ b/vendor/src/github.com/docker/libnetwork/endpoint_info.go
@@ -43,12 +43,16 @@ type InterfaceInfo interface {
// AddressIPv6 returns the IPv6 address assigned to the endpoint.
AddressIPv6() *net.IPNet
+
+ // LinkLocalAddresses returns the list of link-local (IPv4/IPv6) addresses assigned to the endpoint.
+ LinkLocalAddresses() []*net.IPNet
}
type endpointInterface struct {
mac net.HardwareAddr
addr *net.IPNet
addrv6 *net.IPNet
+ llAddrs []*net.IPNet
srcName string
dstPrefix string
routes []*net.IPNet
@@ -67,6 +71,13 @@ func (epi *endpointInterface) MarshalJSON() ([]byte, error) {
if epi.addrv6 != nil {
epMap["addrv6"] = epi.addrv6.String()
}
+ if len(epi.llAddrs) != 0 {
+ list := make([]string, 0, len(epi.llAddrs))
+ for _, ll := range epi.llAddrs {
+ list = append(list, ll.String())
+ }
+ epMap["llAddrs"] = list
+ }
epMap["srcName"] = epi.srcName
epMap["dstPrefix"] = epi.dstPrefix
var routes []string
@@ -102,7 +113,17 @@ func (epi *endpointInterface) UnmarshalJSON(b []byte) error {
return types.InternalErrorf("failed to decode endpoint interface ipv6 address after json unmarshal: %v", err)
}
}
-
+ if v, ok := epMap["llAddrs"]; ok {
+ list := v.([]string)
+ epi.llAddrs = make([]*net.IPNet, 0, len(list))
+ for _, llS := range list {
+ ll, err := types.ParseCIDR(llS)
+ if err != nil {
+ return types.InternalErrorf("failed to decode endpoint interface link-local address (%s) after json unmarshal: %v", llS, err)
+ }
+ epi.llAddrs = append(epi.llAddrs, ll)
+ }
+ }
epi.srcName = epMap["srcName"].(string)
epi.dstPrefix = epMap["dstPrefix"].(string)
@@ -131,6 +152,12 @@ func (epi *endpointInterface) CopyTo(dstEpi *endpointInterface) error {
dstEpi.dstPrefix = epi.dstPrefix
dstEpi.v4PoolID = epi.v4PoolID
dstEpi.v6PoolID = epi.v6PoolID
+ if len(epi.llAddrs) != 0 {
+ dstEpi.llAddrs = make([]*net.IPNet, 0, len(epi.llAddrs))
+ for _, ll := range epi.llAddrs {
+ dstEpi.llAddrs = append(dstEpi.llAddrs, ll)
+ }
+ }
for _, route := range epi.routes {
dstEpi.routes = append(dstEpi.routes, types.GetIPNetCopy(route))
@@ -266,6 +293,10 @@ func (epi *endpointInterface) AddressIPv6() *net.IPNet {
return types.GetIPNetCopy(epi.addrv6)
}
+func (epi *endpointInterface) LinkLocalAddresses() []*net.IPNet {
+ return epi.llAddrs
+}
+
func (epi *endpointInterface) SetNames(srcName string, dstPrefix string) error {
epi.srcName = srcName
epi.dstPrefix = dstPrefix
diff --git a/vendor/src/github.com/docker/libnetwork/error.go b/vendor/src/github.com/docker/libnetwork/error.go
index 4158c9956c..d1291f1db6 100644
--- a/vendor/src/github.com/docker/libnetwork/error.go
+++ b/vendor/src/github.com/docker/libnetwork/error.go
@@ -14,7 +14,7 @@ func (nsn ErrNoSuchNetwork) Error() string {
// NotFound denotes the type of this error
func (nsn ErrNoSuchNetwork) NotFound() {}
-// ErrNoSuchEndpoint is returned when a endpoint query finds no result
+// ErrNoSuchEndpoint is returned when an endpoint query finds no result
type ErrNoSuchEndpoint string
func (nse ErrNoSuchEndpoint) Error() string {
@@ -173,3 +173,13 @@ func (id InvalidContainerIDError) Error() string {
// BadRequest denotes the type of this error
func (id InvalidContainerIDError) BadRequest() {}
+
+// ManagerRedirectError is returned when the request should be redirected to Manager
+type ManagerRedirectError string
+
+func (mr ManagerRedirectError) Error() string {
+ return "Redirect the request to the manager"
+}
+
+// Maskable denotes the type of this error
+func (mr ManagerRedirectError) Maskable() {}
diff --git a/vendor/src/github.com/docker/libnetwork/ipamapi/contract.go b/vendor/src/github.com/docker/libnetwork/ipamapi/contract.go
index 513e482349..e7845f28e7 100644
--- a/vendor/src/github.com/docker/libnetwork/ipamapi/contract.go
+++ b/vendor/src/github.com/docker/libnetwork/ipamapi/contract.go
@@ -80,5 +80,10 @@ type Ipam interface {
// Capability represents the requirements and capabilities of the IPAM driver
type Capability struct {
+ // Whether on address request, libnetwork must
+ // specify the endpoint MAC address
RequiresMACAddress bool
+ // Whether of daemon start, libnetwork must replay the pool
+ // request and the address request for current local networks
+ RequiresRequestReplay bool
}
diff --git a/vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go b/vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go
index 1c9e852602..5baf515534 100644
--- a/vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go
+++ b/vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go
@@ -37,5 +37,7 @@ func Init(ic ipamapi.Callback, l, g interface{}) error {
return err
}
- return ic.RegisterIpamDriver(ipamapi.DefaultIPAM, a)
+ cps := &ipamapi.Capability{RequiresRequestReplay: true}
+
+ return ic.RegisterIpamDriverWithCapabilities(ipamapi.DefaultIPAM, a, cps)
}
diff --git a/vendor/src/github.com/docker/libnetwork/ipams/remote/api/api.go b/vendor/src/github.com/docker/libnetwork/ipams/remote/api/api.go
index e357630cbb..394f2c5b18 100644
--- a/vendor/src/github.com/docker/libnetwork/ipams/remote/api/api.go
+++ b/vendor/src/github.com/docker/libnetwork/ipams/remote/api/api.go
@@ -22,12 +22,16 @@ func (r *Response) GetError() string {
// GetCapabilityResponse is the response of GetCapability request
type GetCapabilityResponse struct {
Response
- RequiresMACAddress bool
+ RequiresMACAddress bool
+ RequiresRequestReplay bool
}
// ToCapability converts the capability response into the internal ipam driver capaility structure
func (capRes GetCapabilityResponse) ToCapability() *ipamapi.Capability {
- return &ipamapi.Capability{RequiresMACAddress: capRes.RequiresMACAddress}
+ return &ipamapi.Capability{
+ RequiresMACAddress: capRes.RequiresMACAddress,
+ RequiresRequestReplay: capRes.RequiresRequestReplay,
+ }
}
// GetAddressSpacesResponse is the response to the ``get default address spaces`` request message
diff --git a/vendor/src/github.com/docker/libnetwork/ipvs/constants.go b/vendor/src/github.com/docker/libnetwork/ipvs/constants.go
new file mode 100644
index 0000000000..103e71a37c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/ipvs/constants.go
@@ -0,0 +1,130 @@
+// +build linux
+
+package ipvs
+
+const (
+ genlCtrlID = 0x10
+)
+
+// GENL control commands
+const (
+ genlCtrlCmdUnspec uint8 = iota
+ genlCtrlCmdNewFamily
+ genlCtrlCmdDelFamily
+ genlCtrlCmdGetFamily
+)
+
+// GENL family attributes
+const (
+ genlCtrlAttrUnspec int = iota
+ genlCtrlAttrFamilyID
+ genlCtrlAttrFamilyName
+)
+
+// IPVS genl commands
+const (
+ ipvsCmdUnspec uint8 = iota
+ ipvsCmdNewService
+ ipvsCmdSetService
+ ipvsCmdDelService
+ ipvsCmdGetService
+ ipvsCmdNewDest
+ ipvsCmdSetDest
+ ipvsCmdDelDest
+ ipvsCmdGetDest
+ ipvsCmdNewDaemon
+ ipvsCmdDelDaemon
+ ipvsCmdGetDaemon
+ ipvsCmdSetConfig
+ ipvsCmdGetConfig
+ ipvsCmdSetInfo
+ ipvsCmdGetInfo
+ ipvsCmdZero
+ ipvsCmdFlush
+)
+
+// Attributes used in the first level of commands
+const (
+ ipvsCmdAttrUnspec int = iota
+ ipvsCmdAttrService
+ ipvsCmdAttrDest
+ ipvsCmdAttrDaemon
+ ipvsCmdAttrTimeoutTCP
+ ipvsCmdAttrTimeoutTCPFin
+ ipvsCmdAttrTimeoutUDP
+)
+
+// Attributes used to describe a service. Used inside nested attribute
+// ipvsCmdAttrService
+const (
+ ipvsSvcAttrUnspec int = iota
+ ipvsSvcAttrAddressFamily
+ ipvsSvcAttrProtocol
+ ipvsSvcAttrAddress
+ ipvsSvcAttrPort
+ ipvsSvcAttrFWMark
+ ipvsSvcAttrSchedName
+ ipvsSvcAttrFlags
+ ipvsSvcAttrTimeout
+ ipvsSvcAttrNetmask
+ ipvsSvcAttrStats
+ ipvsSvcAttrPEName
+)
+
+// Attributes used to describe a destination (real server). Used
+// inside nested attribute ipvsCmdAttrDest.
+const (
+ ipvsDestAttrUnspec int = iota
+ ipvsDestAttrAddress
+ ipvsDestAttrPort
+ ipvsDestAttrForwardingMethod
+ ipvsDestAttrWeight
+ ipvsDestAttrUpperThreshold
+ ipvsDestAttrLowerThreshold
+ ipvsDestAttrActiveConnections
+ ipvsDestAttrInactiveConnections
+ ipvsDestAttrPersistentConnections
+ ipvsDestAttrStats
+)
+
+// Destination forwarding methods
+const (
+ // ConnectionFlagFwdmask indicates the mask in the connection
+ // flags which is used by forwarding method bits.
+ ConnectionFlagFwdMask = 0x0007
+
+ // ConnectionFlagMasq is used for masquerade forwarding method.
+ ConnectionFlagMasq = 0x0000
+
+ // ConnectionFlagLocalNode is used for local node forwarding
+ // method.
+ ConnectionFlagLocalNode = 0x0001
+
+ // ConnectionFlagTunnel is used for tunnel mode forwarding
+ // method.
+ ConnectionFlagTunnel = 0x0002
+
+ // ConnectionFlagDirectRoute is used for direct routing
+ // forwarding method.
+ ConnectionFlagDirectRoute = 0x0003
+)
+
+const (
+ // RoundRobin distributes jobs equally amongst the available
+ // real servers.
+ RoundRobin = "rr"
+
+ // LeastConnection assigns more jobs to real servers with
+ // fewer active jobs.
+ LeastConnection = "lc"
+
+ // DestinationHashing assigns jobs to servers through looking
+ // up a statically assigned hash table by their destination IP
+ // addresses.
+ DestinationHashing = "dh"
+
+ // SourceHashing assigns jobs to servers through looking up
+ // a statically assigned hash table by their source IP
+ // addresses.
+ SourceHashing = "sh"
+)
diff --git a/vendor/src/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/src/github.com/docker/libnetwork/ipvs/ipvs.go
new file mode 100644
index 0000000000..f5bfe8fef4
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/ipvs/ipvs.go
@@ -0,0 +1,113 @@
+// +build linux
+
+package ipvs
+
+import (
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+// Service defines an IPVS service in its entirety.
+type Service struct {
+ // Virtual service address.
+ Address net.IP
+ Protocol uint16
+ Port uint16
+ FWMark uint32 // Firewall mark of the service.
+
+ // Virtual service options.
+ SchedName string
+ Flags uint32
+ Timeout uint32
+ Netmask uint32
+ AddressFamily uint16
+ PEName string
+}
+
+// Destination defines an IPVS destination (real server) in its
+// entirety.
+type Destination struct {
+ Address net.IP
+ Port uint16
+ Weight int
+ ConnectionFlags uint32
+ AddressFamily uint16
+ UpperThreshold uint32
+ LowerThreshold uint32
+}
+
+// Handle provides a namespace specific ipvs handle to program ipvs
+// rules.
+type Handle struct {
+ sock *nl.NetlinkSocket
+}
+
+// New provides a new ipvs handle in the namespace pointed to by the
+// passed path. It will return a valid handle or an error in case an
+// error occured while creating the handle.
+func New(path string) (*Handle, error) {
+ setup()
+
+ n := netns.None()
+ if path != "" {
+ var err error
+ n, err = netns.GetFromPath(path)
+ if err != nil {
+ return nil, err
+ }
+ }
+ defer n.Close()
+
+ sock, err := nl.GetNetlinkSocketAt(n, netns.None(), syscall.NETLINK_GENERIC)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Handle{sock: sock}, nil
+}
+
+// Close closes the ipvs handle. The handle is invalid after Close
+// returns.
+func (i *Handle) Close() {
+ if i.sock != nil {
+ i.sock.Close()
+ }
+}
+
+// NewService creates a new ipvs service in the passed handle.
+func (i *Handle) NewService(s *Service) error {
+ return i.doCmd(s, nil, ipvsCmdNewService)
+}
+
+// UpdateService updates an already existing service in the passed
+// handle.
+func (i *Handle) UpdateService(s *Service) error {
+ return i.doCmd(s, nil, ipvsCmdSetService)
+}
+
+// DelService deletes an already existing service in the passed
+// handle.
+func (i *Handle) DelService(s *Service) error {
+ return i.doCmd(s, nil, ipvsCmdDelService)
+}
+
+// NewDestination creates an new real server in the passed ipvs
+// service which should already be existing in the passed handle.
+func (i *Handle) NewDestination(s *Service, d *Destination) error {
+ return i.doCmd(s, d, ipvsCmdNewDest)
+}
+
+// UpdateDestination updates an already existing real server in the
+// passed ipvs service in the passed handle.
+func (i *Handle) UpdateDestination(s *Service, d *Destination) error {
+ return i.doCmd(s, d, ipvsCmdSetDest)
+}
+
+// DelDestination deletes an already existing real server in the
+// passed ipvs service in the passed handle.
+func (i *Handle) DelDestination(s *Service, d *Destination) error {
+ return i.doCmd(s, d, ipvsCmdDelDest)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/src/github.com/docker/libnetwork/ipvs/netlink.go
new file mode 100644
index 0000000000..509c288f70
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/ipvs/netlink.go
@@ -0,0 +1,234 @@
+// +build linux
+
+package ipvs
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "os/exec"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+var (
+ native = nl.NativeEndian()
+ ipvsFamily int
+ ipvsOnce sync.Once
+)
+
+type genlMsgHdr struct {
+ cmd uint8
+ version uint8
+ reserved uint16
+}
+
+type ipvsFlags struct {
+ flags uint32
+ mask uint32
+}
+
+func deserializeGenlMsg(b []byte) (hdr *genlMsgHdr) {
+ return (*genlMsgHdr)(unsafe.Pointer(&b[0:unsafe.Sizeof(*hdr)][0]))
+}
+
+func (hdr *genlMsgHdr) Serialize() []byte {
+ return (*(*[unsafe.Sizeof(*hdr)]byte)(unsafe.Pointer(hdr)))[:]
+}
+
+func (hdr *genlMsgHdr) Len() int {
+ return int(unsafe.Sizeof(*hdr))
+}
+
+func (f *ipvsFlags) Serialize() []byte {
+ return (*(*[unsafe.Sizeof(*f)]byte)(unsafe.Pointer(f)))[:]
+}
+
+func (f *ipvsFlags) Len() int {
+ return int(unsafe.Sizeof(*f))
+}
+
+func setup() {
+ ipvsOnce.Do(func() {
+ var err error
+ if out, err := exec.Command("modprobe", "-va", "ip_vs").CombinedOutput(); err != nil {
+ logrus.Warnf("Running modprobe ip_vs failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err)
+ }
+
+ ipvsFamily, err = getIPVSFamily()
+ if err != nil {
+ logrus.Errorf("Could not get ipvs family information from the kernel. It is possible that ipvs is not enabled in your kernel. Native loadbalancing will not work until this is fixed.")
+ }
+ })
+}
+
+func fillService(s *Service) nl.NetlinkRequestData {
+ cmdAttr := nl.NewRtAttr(ipvsCmdAttrService, nil)
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrAddressFamily, nl.Uint16Attr(s.AddressFamily))
+ if s.FWMark != 0 {
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrFWMark, nl.Uint32Attr(s.FWMark))
+ } else {
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrProtocol, nl.Uint16Attr(s.Protocol))
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrAddress, rawIPData(s.Address))
+
+ // Port needs to be in network byte order.
+ portBuf := new(bytes.Buffer)
+ binary.Write(portBuf, binary.BigEndian, s.Port)
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrPort, portBuf.Bytes())
+ }
+
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrSchedName, nl.ZeroTerminated(s.SchedName))
+ if s.PEName != "" {
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrPEName, nl.ZeroTerminated(s.PEName))
+ }
+
+ f := &ipvsFlags{
+ flags: s.Flags,
+ mask: 0xFFFFFFFF,
+ }
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrFlags, f.Serialize())
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrTimeout, nl.Uint32Attr(s.Timeout))
+ nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrNetmask, nl.Uint32Attr(s.Netmask))
+ return cmdAttr
+}
+
+func fillDestinaton(d *Destination) nl.NetlinkRequestData {
+ cmdAttr := nl.NewRtAttr(ipvsCmdAttrDest, nil)
+
+ nl.NewRtAttrChild(cmdAttr, ipvsDestAttrAddress, rawIPData(d.Address))
+ // Port needs to be in network byte order.
+ portBuf := new(bytes.Buffer)
+ binary.Write(portBuf, binary.BigEndian, d.Port)
+ nl.NewRtAttrChild(cmdAttr, ipvsDestAttrPort, portBuf.Bytes())
+
+ nl.NewRtAttrChild(cmdAttr, ipvsDestAttrForwardingMethod, nl.Uint32Attr(d.ConnectionFlags&ConnectionFlagFwdMask))
+ nl.NewRtAttrChild(cmdAttr, ipvsDestAttrWeight, nl.Uint32Attr(uint32(d.Weight)))
+ nl.NewRtAttrChild(cmdAttr, ipvsDestAttrUpperThreshold, nl.Uint32Attr(d.UpperThreshold))
+ nl.NewRtAttrChild(cmdAttr, ipvsDestAttrLowerThreshold, nl.Uint32Attr(d.LowerThreshold))
+
+ return cmdAttr
+}
+
+func (i *Handle) doCmd(s *Service, d *Destination, cmd uint8) error {
+ req := newIPVSRequest(cmd)
+ req.AddData(fillService(s))
+
+ if d != nil {
+ req.AddData(fillDestinaton(d))
+ }
+
+ if _, err := execute(i.sock, req, 0); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func getIPVSFamily() (int, error) {
+ sock, err := nl.GetNetlinkSocketAt(netns.None(), netns.None(), syscall.NETLINK_GENERIC)
+ if err != nil {
+ return 0, err
+ }
+
+ req := newGenlRequest(genlCtrlID, genlCtrlCmdGetFamily)
+ req.AddData(nl.NewRtAttr(genlCtrlAttrFamilyName, nl.ZeroTerminated("IPVS")))
+
+ msgs, err := execute(sock, req, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ for _, m := range msgs {
+ hdr := deserializeGenlMsg(m)
+ attrs, err := nl.ParseRouteAttr(m[hdr.Len():])
+ if err != nil {
+ return 0, err
+ }
+
+ for _, attr := range attrs {
+ switch int(attr.Attr.Type) {
+ case genlCtrlAttrFamilyID:
+ return int(native.Uint16(attr.Value[0:2])), nil
+ }
+ }
+ }
+
+ return 0, fmt.Errorf("no family id in the netlink response")
+}
+
+func rawIPData(ip net.IP) []byte {
+ family := nl.GetIPFamily(ip)
+ if family == nl.FAMILY_V4 {
+ return ip.To4()
+ }
+
+ return ip
+}
+
+func newIPVSRequest(cmd uint8) *nl.NetlinkRequest {
+ return newGenlRequest(ipvsFamily, cmd)
+}
+
+func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest {
+ req := nl.NewNetlinkRequest(familyID, syscall.NLM_F_ACK)
+ req.AddData(&genlMsgHdr{cmd: cmd, version: 1})
+ return req
+}
+
+func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) {
+ var (
+ err error
+ )
+
+ if err := s.Send(req); err != nil {
+ return nil, err
+ }
+
+ pid, err := s.GetPid()
+ if err != nil {
+ return nil, err
+ }
+
+ var res [][]byte
+
+done:
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range msgs {
+ if m.Header.Seq != req.Seq {
+ return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq)
+ }
+ if m.Header.Pid != pid {
+ return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
+ }
+ if m.Header.Type == syscall.NLMSG_DONE {
+ break done
+ }
+ if m.Header.Type == syscall.NLMSG_ERROR {
+ error := int32(native.Uint32(m.Data[0:4]))
+ if error == 0 {
+ break done
+ }
+ return nil, syscall.Errno(-error)
+ }
+ if resType != 0 && m.Header.Type != resType {
+ continue
+ }
+ res = append(res, m.Data)
+ if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
+ break done
+ }
+ }
+ }
+ return res, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/netutils/utils_linux.go b/vendor/src/github.com/docker/libnetwork/netutils/utils_linux.go
index f1e73d2297..7781bc6a09 100644
--- a/vendor/src/github.com/docker/libnetwork/netutils/utils_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/netutils/utils_linux.go
@@ -9,6 +9,7 @@ import (
"strings"
"github.com/docker/libnetwork/ipamutils"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/resolvconf"
"github.com/docker/libnetwork/types"
@@ -16,16 +17,18 @@ import (
)
var (
- networkGetRoutesFct = netlink.RouteList
+ networkGetRoutesFct func(netlink.Link, int) ([]netlink.Route, error)
)
// CheckRouteOverlaps checks whether the passed network overlaps with any existing routes
func CheckRouteOverlaps(toCheck *net.IPNet) error {
+ if networkGetRoutesFct == nil {
+ networkGetRoutesFct = ns.NlHandle().RouteList
+ }
networks, err := networkGetRoutesFct(nil, netlink.FAMILY_V4)
if err != nil {
return err
}
-
for _, network := range networks {
if network.Dst != nil && NetworkOverlaps(toCheck, network.Dst) {
return ErrNetworkOverlaps
@@ -37,13 +40,18 @@ func CheckRouteOverlaps(toCheck *net.IPNet) error {
// GenerateIfaceName returns an interface name using the passed in
// prefix and the length of random bytes. The api ensures that the
// there are is no interface which exists with that name.
-func GenerateIfaceName(prefix string, len int) (string, error) {
+func GenerateIfaceName(nlh *netlink.Handle, prefix string, len int) (string, error) {
+ linkByName := netlink.LinkByName
+ if nlh != nil {
+ linkByName = nlh.LinkByName
+ }
for i := 0; i < 3; i++ {
name, err := GenerateRandomName(prefix, len)
if err != nil {
continue
}
- if _, err := netlink.LinkByName(name); err != nil {
+ _, err = linkByName(name)
+ if err != nil {
if strings.Contains(err.Error(), "not found") {
return name, nil
}
@@ -67,13 +75,13 @@ func ElectInterfaceAddresses(name string) (*net.IPNet, []*net.IPNet, error) {
defer osl.InitOSContext()()
- link, _ := netlink.LinkByName(name)
+ link, _ := ns.NlHandle().LinkByName(name)
if link != nil {
- v4addr, err := netlink.AddrList(link, netlink.FAMILY_V4)
+ v4addr, err := ns.NlHandle().AddrList(link, netlink.FAMILY_V4)
if err != nil {
return nil, nil, err
}
- v6addr, err := netlink.AddrList(link, netlink.FAMILY_V6)
+ v6addr, err := ns.NlHandle().AddrList(link, netlink.FAMILY_V6)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/src/github.com/docker/libnetwork/network.go b/vendor/src/github.com/docker/libnetwork/network.go
index 5072e088c2..c4e988fadd 100644
--- a/vendor/src/github.com/docker/libnetwork/network.go
+++ b/vendor/src/github.com/docker/libnetwork/network.go
@@ -64,6 +64,7 @@ type NetworkInfo interface {
IPv6Enabled() bool
Internal() bool
Labels() map[string]string
+ Dynamic() bool
}
// EndpointWalker is a client provided function which will be used to walk the Endpoints.
@@ -74,6 +75,20 @@ type svcInfo struct {
svcMap map[string][]net.IP
svcIPv6Map map[string][]net.IP
ipMap map[string]string
+ service map[string][]servicePorts
+}
+
+// backing container or host's info
+type serviceTarget struct {
+ name string
+ ip net.IP
+ port uint16
+}
+
+type servicePorts struct {
+ portName string
+ proto string
+ target []serviceTarget
}
// IpamConf contains all the ipam related configurations for a network
@@ -171,7 +186,9 @@ type network struct {
drvOnce *sync.Once
internal bool
inDelete bool
+ ingress bool
driverTables []string
+ dynamic bool
sync.Mutex
}
@@ -312,6 +329,7 @@ func (n *network) CopyTo(o datastore.KVObject) error {
dstN.drvOnce = n.drvOnce
dstN.internal = n.internal
dstN.inDelete = n.inDelete
+ dstN.ingress = n.ingress
// copy labels
if dstN.labels == nil {
@@ -418,6 +436,7 @@ func (n *network) MarshalJSON() ([]byte, error) {
}
netMap["internal"] = n.internal
netMap["inDelete"] = n.inDelete
+ netMap["ingress"] = n.ingress
return json.Marshal(netMap)
}
@@ -508,6 +527,9 @@ func (n *network) UnmarshalJSON(b []byte) (err error) {
if v, ok := netMap["inDelete"]; ok {
n.inDelete = v.(bool)
}
+ if v, ok := netMap["ingress"]; ok {
+ n.ingress = v.(bool)
+ }
// Reconcile old networks with the recently added `--ipv6` flag
if !n.enableIPv6 {
n.enableIPv6 = len(n.ipamV6Info) > 0
@@ -539,6 +561,14 @@ func NetworkOptionGeneric(generic map[string]interface{}) NetworkOption {
}
}
+// NetworkOptionIngress returns an option setter to indicate if a network is
+// an ingress network.
+func NetworkOptionIngress() NetworkOption {
+ return func(n *network) {
+ n.ingress = true
+ }
+}
+
// NetworkOptionPersist returns an option setter to set persistence policy for a network
func NetworkOptionPersist(persist bool) NetworkOption {
return func(n *network) {
@@ -603,6 +633,13 @@ func NetworkOptionLabels(labels map[string]string) NetworkOption {
}
}
+// NetworkOptionDynamic function returns an option setter for dynamic option for a network
+func NetworkOptionDynamic() NetworkOption {
+ return func(n *network) {
+ n.dynamic = true
+ }
+}
+
// NetworkOptionDeferIPv6Alloc instructs the network to defer the IPV6 address allocation until after the endpoint has been created
// It is being provided to support the specific docker daemon flags where user can deterministically assign an IPv6 address
// to a container as combination of fixed-cidr-v6 + mac-address
@@ -669,7 +706,7 @@ func (n *network) driver(load bool) (driverapi.Driver, error) {
if cap != nil {
n.scope = cap.DataScope
}
- if c.cfg.Daemon.IsAgent {
+ if c.isAgent() {
// If we are running in agent mode then all networks
// in libnetwork are local scope regardless of the
// backing driver.
@@ -800,6 +837,12 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi
ep.processOptions(options...)
+ for _, llIPNet := range ep.Iface().LinkLocalAddresses() {
+ if !llIPNet.IP.IsLinkLocalUnicast() {
+ return nil, types.BadRequestErrorf("invalid link local IP address: %v", llIPNet.IP)
+ }
+ }
+
if opt, ok := ep.generic[netlabel.MacAddress]; ok {
if mac, ok := opt.(net.HardwareAddr); ok {
ep.iface.mac = mac
@@ -1196,6 +1239,7 @@ func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error {
d := &IpamInfo{}
(*infoList)[i] = d
+ d.AddressSpace = n.addrSpace
d.PoolID, d.Pool, d.Meta, err = n.requestPoolHelper(ipam, n.addrSpace, cfg.PreferredPool, cfg.SubPool, n.ipamOptions, ipVer == 6)
if err != nil {
return err
@@ -1427,6 +1471,13 @@ func (n *network) Internal() bool {
return n.internal
}
+func (n *network) Dynamic() bool {
+ n.Lock()
+ defer n.Unlock()
+
+ return n.dynamic
+}
+
func (n *network) IPv6Enabled() bool {
n.Lock()
defer n.Unlock()
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/broadcast.go b/vendor/src/github.com/docker/libnetwork/networkdb/broadcast.go
index a1c3c61c84..2e07729569 100644
--- a/vendor/src/github.com/docker/libnetwork/networkdb/broadcast.go
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/broadcast.go
@@ -5,20 +5,6 @@ import (
"github.com/hashicorp/serf/serf"
)
-type networkEventType uint8
-
-const (
- networkJoin networkEventType = 1 + iota
- networkLeave
-)
-
-type networkEventData struct {
- Event networkEventType
- LTime serf.LamportTime
- NodeName string
- NetworkID string
-}
-
type networkEventMessage struct {
id string
node string
@@ -37,15 +23,15 @@ func (m *networkEventMessage) Message() []byte {
func (m *networkEventMessage) Finished() {
}
-func (nDB *NetworkDB) sendNetworkEvent(nid string, event networkEventType, ltime serf.LamportTime) error {
- nEvent := networkEventData{
- Event: event,
+func (nDB *NetworkDB) sendNetworkEvent(nid string, event NetworkEvent_Type, ltime serf.LamportTime) error {
+ nEvent := NetworkEvent{
+ Type: event,
LTime: ltime,
NodeName: nDB.config.NodeName,
NetworkID: nid,
}
- raw, err := encodeMessage(networkEventMsg, &nEvent)
+ raw, err := encodeMessage(MessageTypeNetworkEvent, &nEvent)
if err != nil {
return err
}
@@ -58,24 +44,6 @@ func (nDB *NetworkDB) sendNetworkEvent(nid string, event networkEventType, ltime
return nil
}
-type tableEventType uint8
-
-const (
- tableEntryCreate tableEventType = 1 + iota
- tableEntryUpdate
- tableEntryDelete
-)
-
-type tableEventData struct {
- Event tableEventType
- LTime serf.LamportTime
- NetworkID string
- TableName string
- NodeName string
- Value []byte
- Key string
-}
-
type tableEventMessage struct {
id string
tname string
@@ -96,9 +64,9 @@ func (m *tableEventMessage) Message() []byte {
func (m *tableEventMessage) Finished() {
}
-func (nDB *NetworkDB) sendTableEvent(event tableEventType, nid string, tname string, key string, entry *entry) error {
- tEvent := tableEventData{
- Event: event,
+func (nDB *NetworkDB) sendTableEvent(event TableEvent_Type, nid string, tname string, key string, entry *entry) error {
+ tEvent := TableEvent{
+ Type: event,
LTime: entry.ltime,
NodeName: nDB.config.NodeName,
NetworkID: nid,
@@ -107,15 +75,31 @@ func (nDB *NetworkDB) sendTableEvent(event tableEventType, nid string, tname str
Value: entry.value,
}
- raw, err := encodeMessage(tableEventMsg, &tEvent)
+ raw, err := encodeMessage(MessageTypeTableEvent, &tEvent)
if err != nil {
return err
}
+ var broadcastQ *memberlist.TransmitLimitedQueue
nDB.RLock()
- broadcastQ := nDB.networks[nDB.config.NodeName][nid].tableBroadcasts
+ thisNodeNetworks, ok := nDB.networks[nDB.config.NodeName]
+ if ok {
+ // The network may have been removed
+ network, networkOk := thisNodeNetworks[nid]
+ if !networkOk {
+ nDB.RUnlock()
+ return nil
+ }
+
+ broadcastQ = network.tableBroadcasts
+ }
nDB.RUnlock()
+ // The network may have been removed
+ if broadcastQ == nil {
+ return nil
+ }
+
broadcastQ.QueueBroadcast(&tableEventMessage{
msg: raw,
id: nid,
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go b/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go
index 317f1e5974..7b1384510c 100644
--- a/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go
@@ -1,6 +1,7 @@
package networkdb
import (
+ "bytes"
"crypto/rand"
"fmt"
"math/big"
@@ -10,7 +11,6 @@ import (
"github.com/Sirupsen/logrus"
"github.com/hashicorp/memberlist"
- "github.com/hashicorp/serf/serf"
)
const reapInterval = 2 * time.Second
@@ -34,6 +34,46 @@ func (l *logWriter) Write(p []byte) (int, error) {
return len(p), nil
}
+// SetKey adds a new key to the key ring
+func (nDB *NetworkDB) SetKey(key []byte) {
+ for _, dbKey := range nDB.config.Keys {
+ if bytes.Equal(key, dbKey) {
+ return
+ }
+ }
+ nDB.config.Keys = append(nDB.config.Keys, key)
+ if nDB.keyring != nil {
+ nDB.keyring.AddKey(key)
+ }
+}
+
+// SetPrimaryKey sets the given key as the primary key. This should have
+// been added apriori through SetKey
+func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
+ for _, dbKey := range nDB.config.Keys {
+ if bytes.Equal(key, dbKey) {
+ if nDB.keyring != nil {
+ nDB.keyring.UseKey(dbKey)
+ }
+ break
+ }
+ }
+}
+
+// RemoveKey removes a key from the key ring. The key being removed
+// can't be the primary key
+func (nDB *NetworkDB) RemoveKey(key []byte) {
+ for i, dbKey := range nDB.config.Keys {
+ if bytes.Equal(key, dbKey) {
+ nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
+ if nDB.keyring != nil {
+ nDB.keyring.RemoveKey(dbKey)
+ }
+ break
+ }
+ }
+}
+
func (nDB *NetworkDB) clusterInit() error {
config := memberlist.DefaultLANConfig()
config.Name = nDB.config.NodeName
@@ -48,6 +88,15 @@ func (nDB *NetworkDB) clusterInit() error {
config.Events = &eventDelegate{nDB: nDB}
config.LogOutput = &logWriter{}
+ var err error
+ if len(nDB.config.Keys) > 0 {
+ nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
+ if err != nil {
+ return err
+ }
+ config.Keyring = nDB.keyring
+ }
+
nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
NumNodes: func() int {
return len(nDB.nodes)
@@ -144,7 +193,10 @@ func (nDB *NetworkDB) reapNetworks() {
}
func (nDB *NetworkDB) reapTableEntries() {
- var paths []string
+ var (
+ paths []string
+ entries []*entry
+ )
now := time.Now()
@@ -160,12 +212,14 @@ func (nDB *NetworkDB) reapTableEntries() {
}
paths = append(paths, path)
+ entries = append(entries, entry)
return false
})
nDB.RUnlock()
nDB.Lock()
- for _, path := range paths {
+ for i, path := range paths {
+ entry := entries[i]
params := strings.Split(path[1:], "/")
tname := params[0]
nid := params[1]
@@ -178,6 +232,8 @@ func (nDB *NetworkDB) reapTableEntries() {
if _, ok := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key)); !ok {
logrus.Errorf("Could not delete entry in network %s with table name %s and key %s as it does not exist", nid, tname, key)
}
+
+ nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
}
nDB.Unlock()
}
@@ -185,7 +241,8 @@ func (nDB *NetworkDB) reapTableEntries() {
func (nDB *NetworkDB) gossip() {
networkNodes := make(map[string][]string)
nDB.RLock()
- for nid := range nDB.networks[nDB.config.NodeName] {
+ thisNodeNetworks := nDB.networks[nDB.config.NodeName]
+ for nid := range thisNodeNetworks {
networkNodes[nid] = nDB.networkNodes[nid]
}
@@ -196,8 +253,17 @@ func (nDB *NetworkDB) gossip() {
bytesAvail := udpSendBuf - compoundHeaderOverhead
nDB.RLock()
- broadcastQ := nDB.networks[nDB.config.NodeName][nid].tableBroadcasts
+ network, ok := thisNodeNetworks[nid]
nDB.RUnlock()
+ if !ok || network == nil {
+ // It is normal for the network to be removed
+ // between the time we collect the network
+ // attachments of this node and processing
+ // them here.
+ continue
+ }
+
+ broadcastQ := network.tableBroadcasts
if broadcastQ == nil {
logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
@@ -222,21 +288,13 @@ func (nDB *NetworkDB) gossip() {
}
// Send the compound message
- if err := nDB.memberlist.SendToUDP(mnode, compound.Bytes()); err != nil {
+ if err := nDB.memberlist.SendToUDP(mnode, compound); err != nil {
logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
}
}
}
}
-type bulkSyncMessage struct {
- LTime serf.LamportTime
- Unsolicited bool
- NodeName string
- Networks []string
- Payload []byte
-}
-
func (nDB *NetworkDB) bulkSyncTables() {
var networks []string
nDB.RLock()
@@ -331,8 +389,8 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b
}
params := strings.Split(path[1:], "/")
- tEvent := tableEventData{
- Event: tableEntryCreate,
+ tEvent := TableEvent{
+ Type: TableEventTypeCreate,
LTime: entry.ltime,
NodeName: entry.node,
NetworkID: nid,
@@ -341,7 +399,7 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b
Value: entry.value,
}
- msg, err := encodeMessage(tableEventMsg, &tEvent)
+ msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
if err != nil {
logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
return false
@@ -356,15 +414,15 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b
// Create a compound message
compound := makeCompoundMessage(msgs)
- bsm := bulkSyncMessage{
+ bsm := BulkSyncMessage{
LTime: nDB.tableClock.Time(),
Unsolicited: unsolicited,
NodeName: nDB.config.NodeName,
Networks: networks,
- Payload: compound.Bytes(),
+ Payload: compound,
}
- buf, err := encodeMessage(bulkSyncMsg, &bsm)
+ buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
if err != nil {
return fmt.Errorf("failed to encode bulk sync message: %v", err)
}
@@ -383,16 +441,19 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b
return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
}
- startTime := time.Now()
- select {
- case <-time.After(30 * time.Second):
- logrus.Errorf("Bulk sync to node %s timed out", node)
- case <-ch:
- nDB.Lock()
- delete(nDB.bulkSyncAckTbl, node)
- nDB.Unlock()
-
- logrus.Debugf("%s: Bulk sync to node %s took %s", nDB.config.NodeName, node, time.Now().Sub(startTime))
+ // Wait on a response only if it is unsolicited.
+ if unsolicited {
+ startTime := time.Now()
+ select {
+ case <-time.After(30 * time.Second):
+ logrus.Errorf("Bulk sync to node %s timed out", node)
+ case <-ch:
+ nDB.Lock()
+ delete(nDB.bulkSyncAckTbl, node)
+ nDB.Unlock()
+
+ logrus.Debugf("%s: Bulk sync to node %s took %s", nDB.config.NodeName, node, time.Now().Sub(startTime))
+ }
}
return nil
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/delegate.go b/vendor/src/github.com/docker/libnetwork/networkdb/delegate.go
index f2c7b2ff76..596edc5eee 100644
--- a/vendor/src/github.com/docker/libnetwork/networkdb/delegate.go
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/delegate.go
@@ -5,21 +5,9 @@ import (
"time"
"github.com/Sirupsen/logrus"
- "github.com/hashicorp/serf/serf"
+ "github.com/gogo/protobuf/proto"
)
-type networkData struct {
- LTime serf.LamportTime
- ID string
- NodeName string
- Leaving bool
-}
-
-type networkPushPull struct {
- LTime serf.LamportTime
- Networks []networkData
-}
-
type delegate struct {
nDB *NetworkDB
}
@@ -28,7 +16,7 @@ func (d *delegate) NodeMeta(limit int) []byte {
return []byte{}
}
-func (nDB *NetworkDB) handleNetworkEvent(nEvent *networkEventData) bool {
+func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
// Update our local clock if the received messages has newer
// time.
nDB.networkClock.Witness(nEvent.LTime)
@@ -39,7 +27,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *networkEventData) bool {
nodeNetworks, ok := nDB.networks[nEvent.NodeName]
if !ok {
// We haven't heard about this node at all. Ignore the leave
- if nEvent.Event == networkLeave {
+ if nEvent.Type == NetworkEventTypeLeave {
return false
}
@@ -55,7 +43,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *networkEventData) bool {
}
n.ltime = nEvent.LTime
- n.leaving = nEvent.Event == networkLeave
+ n.leaving = nEvent.Type == NetworkEventTypeLeave
if n.leaving {
n.leaveTime = time.Now()
}
@@ -63,7 +51,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *networkEventData) bool {
return true
}
- if nEvent.Event == networkLeave {
+ if nEvent.Type == NetworkEventTypeLeave {
return false
}
@@ -77,7 +65,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *networkEventData) bool {
return true
}
-func (nDB *NetworkDB) handleTableEvent(tEvent *tableEventData) bool {
+func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
// Update our local clock if the received messages has newer
// time.
nDB.tableClock.Witness(tEvent.LTime)
@@ -94,7 +82,7 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *tableEventData) bool {
ltime: tEvent.LTime,
node: tEvent.NodeName,
value: tEvent.Value,
- deleting: tEvent.Event == tableEntryDelete,
+ deleting: tEvent.Type == TableEventTypeDelete,
}
if entry.deleting {
@@ -107,12 +95,12 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *tableEventData) bool {
nDB.Unlock()
var op opType
- switch tEvent.Event {
- case tableEntryCreate:
+ switch tEvent.Type {
+ case TableEventTypeCreate:
op = opCreate
- case tableEntryUpdate:
+ case TableEventTypeUpdate:
op = opUpdate
- case tableEntryDelete:
+ case TableEventTypeDelete:
op = opDelete
}
@@ -120,36 +108,35 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *tableEventData) bool {
return true
}
-func (nDB *NetworkDB) handleCompound(buf []byte) {
+func (nDB *NetworkDB) handleCompound(buf []byte, isBulkSync bool) {
// Decode the parts
- trunc, parts, err := decodeCompoundMessage(buf[1:])
+ parts, err := decodeCompoundMessage(buf)
if err != nil {
logrus.Errorf("Failed to decode compound request: %v", err)
return
}
- // Log any truncation
- if trunc > 0 {
- logrus.Warnf("Compound request had %d truncated messages", trunc)
- }
-
// Handle each message
for _, part := range parts {
- nDB.handleMessage(part)
+ nDB.handleMessage(part, isBulkSync)
}
}
-func (nDB *NetworkDB) handleTableMessage(buf []byte) {
- var tEvent tableEventData
- if err := decodeMessage(buf[1:], &tEvent); err != nil {
+func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) {
+ var tEvent TableEvent
+ if err := proto.Unmarshal(buf, &tEvent); err != nil {
logrus.Errorf("Error decoding table event message: %v", err)
return
}
- if rebroadcast := nDB.handleTableEvent(&tEvent); rebroadcast {
- // Copy the buffer since we cannot rely on the slice not changing
- newBuf := make([]byte, len(buf))
- copy(newBuf, buf)
+ // Do not rebroadcast a bulk sync
+ if rebroadcast := nDB.handleTableEvent(&tEvent); rebroadcast && !isBulkSync {
+ var err error
+ buf, err = encodeRawMessage(MessageTypeTableEvent, buf)
+ if err != nil {
+ logrus.Errorf("Error marshalling gossip message for network event rebroadcast: %v", err)
+ return
+ }
nDB.RLock()
n, ok := nDB.networks[nDB.config.NodeName][tEvent.NetworkID]
@@ -160,8 +147,13 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte) {
}
broadcastQ := n.tableBroadcasts
+
+ if broadcastQ == nil {
+ return
+ }
+
broadcastQ.QueueBroadcast(&tableEventMessage{
- msg: newBuf,
+ msg: buf,
id: tEvent.NetworkID,
tname: tEvent.TableName,
key: tEvent.Key,
@@ -171,19 +163,22 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte) {
}
func (nDB *NetworkDB) handleNetworkMessage(buf []byte) {
- var nEvent networkEventData
- if err := decodeMessage(buf[1:], &nEvent); err != nil {
+ var nEvent NetworkEvent
+ if err := proto.Unmarshal(buf, &nEvent); err != nil {
logrus.Errorf("Error decoding network event message: %v", err)
return
}
if rebroadcast := nDB.handleNetworkEvent(&nEvent); rebroadcast {
- // Copy the buffer since it we cannot rely on the slice not changing
- newBuf := make([]byte, len(buf))
- copy(newBuf, buf)
+ var err error
+ buf, err = encodeRawMessage(MessageTypeNetworkEvent, buf)
+ if err != nil {
+ logrus.Errorf("Error marshalling gossip message for network event rebroadcast: %v", err)
+ return
+ }
nDB.networkBroadcasts.QueueBroadcast(&networkEventMessage{
- msg: newBuf,
+ msg: buf,
id: nEvent.NetworkID,
node: nEvent.NodeName,
})
@@ -191,8 +186,8 @@ func (nDB *NetworkDB) handleNetworkMessage(buf []byte) {
}
func (nDB *NetworkDB) handleBulkSync(buf []byte) {
- var bsm bulkSyncMessage
- if err := decodeMessage(buf[1:], &bsm); err != nil {
+ var bsm BulkSyncMessage
+ if err := proto.Unmarshal(buf, &bsm); err != nil {
logrus.Errorf("Error decoding bulk sync message: %v", err)
return
}
@@ -201,7 +196,7 @@ func (nDB *NetworkDB) handleBulkSync(buf []byte) {
nDB.tableClock.Witness(bsm.LTime)
}
- nDB.handleMessage(bsm.Payload)
+ nDB.handleMessage(bsm.Payload, true)
// Don't respond to a bulk sync which was not unsolicited
if !bsm.Unsolicited {
@@ -220,20 +215,24 @@ func (nDB *NetworkDB) handleBulkSync(buf []byte) {
}
}
-func (nDB *NetworkDB) handleMessage(buf []byte) {
- msgType := messageType(buf[0])
-
- switch msgType {
- case networkEventMsg:
- nDB.handleNetworkMessage(buf)
- case tableEventMsg:
- nDB.handleTableMessage(buf)
- case compoundMsg:
- nDB.handleCompound(buf)
- case bulkSyncMsg:
- nDB.handleBulkSync(buf)
+func (nDB *NetworkDB) handleMessage(buf []byte, isBulkSync bool) {
+ mType, data, err := decodeMessage(buf)
+ if err != nil {
+ logrus.Errorf("Error decoding gossip message to get message type: %v", err)
+ return
+ }
+
+ switch mType {
+ case MessageTypeNetworkEvent:
+ nDB.handleNetworkMessage(data)
+ case MessageTypeTableEvent:
+ nDB.handleTableMessage(data, isBulkSync)
+ case MessageTypeBulkSync:
+ nDB.handleBulkSync(data)
+ case MessageTypeCompound:
+ nDB.handleCompound(data, isBulkSync)
default:
- logrus.Errorf("%s: unknown message type %d payload = %v", nDB.config.NodeName, msgType, buf[:8])
+ logrus.Errorf("%s: unknown message type %d", nDB.config.NodeName, mType)
}
}
@@ -242,7 +241,7 @@ func (d *delegate) NotifyMsg(buf []byte) {
return
}
- d.nDB.handleMessage(buf)
+ d.nDB.handleMessage(buf, false)
}
func (d *delegate) GetBroadcasts(overhead, limit int) [][]byte {
@@ -253,22 +252,22 @@ func (d *delegate) LocalState(join bool) []byte {
d.nDB.RLock()
defer d.nDB.RUnlock()
- pp := networkPushPull{
+ pp := NetworkPushPull{
LTime: d.nDB.networkClock.Time(),
}
for name, nn := range d.nDB.networks {
for _, n := range nn {
- pp.Networks = append(pp.Networks, networkData{
- LTime: n.ltime,
- ID: n.id,
- NodeName: name,
- Leaving: n.leaving,
+ pp.Networks = append(pp.Networks, &NetworkEntry{
+ LTime: n.ltime,
+ NetworkID: n.id,
+ NodeName: name,
+ Leaving: n.leaving,
})
}
}
- buf, err := encodeMessage(networkPushPullMsg, &pp)
+ buf, err := encodeMessage(MessageTypePushPull, &pp)
if err != nil {
logrus.Errorf("Failed to encode local network state: %v", err)
return nil
@@ -283,12 +282,19 @@ func (d *delegate) MergeRemoteState(buf []byte, isJoin bool) {
return
}
- if messageType(buf[0]) != networkPushPullMsg {
+ var gMsg GossipMessage
+ err := proto.Unmarshal(buf, &gMsg)
+ if err != nil {
+ logrus.Errorf("Error unmarshalling push pull messsage: %v", err)
+ return
+ }
+
+ if gMsg.Type != MessageTypePushPull {
logrus.Errorf("Invalid message type %v received from remote", buf[0])
}
- pp := networkPushPull{}
- if err := decodeMessage(buf[1:], &pp); err != nil {
+ pp := NetworkPushPull{}
+ if err := proto.Unmarshal(gMsg.Data, &pp); err != nil {
logrus.Errorf("Failed to decode remote network state: %v", err)
return
}
@@ -298,15 +304,15 @@ func (d *delegate) MergeRemoteState(buf []byte, isJoin bool) {
}
for _, n := range pp.Networks {
- nEvent := &networkEventData{
+ nEvent := &NetworkEvent{
LTime: n.LTime,
NodeName: n.NodeName,
- NetworkID: n.ID,
- Event: networkJoin,
+ NetworkID: n.NetworkID,
+ Type: NetworkEventTypeJoin,
}
if n.Leaving {
- nEvent.Event = networkLeave
+ nEvent.Type = NetworkEventTypeLeave
}
d.nDB.handleNetworkEvent(nEvent)
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/message.go b/vendor/src/github.com/docker/libnetwork/networkdb/message.go
index 48f69da0e0..a861752bd4 100644
--- a/vendor/src/github.com/docker/libnetwork/networkdb/message.go
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/message.go
@@ -1,32 +1,6 @@
package networkdb
-import (
- "bytes"
- "encoding/binary"
- "fmt"
-
- "github.com/hashicorp/go-msgpack/codec"
-)
-
-type messageType uint8
-
-const (
- // For network join/leave event message
- networkEventMsg messageType = 1 + iota
-
- // For pushing/pulling network/node association state
- networkPushPullMsg
-
- // For table entry CRUD event message
- tableEventMsg
-
- // For building a compound message which packs many different
- // message types together
- compoundMsg
-
- // For syncing table entries in bulk b/w nodes.
- bulkSyncMsg
-)
+import "github.com/gogo/protobuf/proto"
const (
// Max udp message size chosen to avoid network packet
@@ -37,86 +11,92 @@ const (
// bytes (num messages)
compoundHeaderOverhead = 5
- // Overhead for each embedded message in a compound message 2
+ // Overhead for each embedded message in a compound message 4
// bytes (len of embedded message)
- compoundOverhead = 2
+ compoundOverhead = 4
)
-func decodeMessage(buf []byte, out interface{}) error {
- var handle codec.MsgpackHandle
- return codec.NewDecoder(bytes.NewReader(buf), &handle).Decode(out)
+func encodeRawMessage(t MessageType, raw []byte) ([]byte, error) {
+ gMsg := GossipMessage{
+ Type: t,
+ Data: raw,
+ }
+
+ buf, err := proto.Marshal(&gMsg)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf, nil
}
-func encodeMessage(t messageType, msg interface{}) ([]byte, error) {
- buf := bytes.NewBuffer(nil)
- buf.WriteByte(uint8(t))
+func encodeMessage(t MessageType, msg interface{}) ([]byte, error) {
+ buf, err := proto.Marshal(msg.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
- handle := codec.MsgpackHandle{}
- encoder := codec.NewEncoder(buf, &handle)
- err := encoder.Encode(msg)
- return buf.Bytes(), err
+ buf, err = encodeRawMessage(t, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf, nil
+}
+
+func decodeMessage(buf []byte) (MessageType, []byte, error) {
+ var gMsg GossipMessage
+
+ err := proto.Unmarshal(buf, &gMsg)
+ if err != nil {
+ return MessageTypeInvalid, nil, err
+ }
+
+ return gMsg.Type, gMsg.Data, nil
}
// makeCompoundMessage takes a list of messages and generates
// a single compound message containing all of them
-func makeCompoundMessage(msgs [][]byte) *bytes.Buffer {
- // Create a local buffer
- buf := bytes.NewBuffer(nil)
+func makeCompoundMessage(msgs [][]byte) []byte {
+ cMsg := CompoundMessage{}
- // Write out the type
- buf.WriteByte(uint8(compoundMsg))
+ cMsg.Messages = make([]*CompoundMessage_SimpleMessage, 0, len(msgs))
+ for _, m := range msgs {
+ cMsg.Messages = append(cMsg.Messages, &CompoundMessage_SimpleMessage{
+ Payload: m,
+ })
+ }
- // Write out the number of message
- binary.Write(buf, binary.BigEndian, uint32(len(msgs)))
+ buf, err := proto.Marshal(&cMsg)
+ if err != nil {
+ return nil
+ }
- // Add the message lengths
- for _, m := range msgs {
- binary.Write(buf, binary.BigEndian, uint16(len(m)))
+ gMsg := GossipMessage{
+ Type: MessageTypeCompound,
+ Data: buf,
}
- // Append the messages
- for _, m := range msgs {
- buf.Write(m)
+ buf, err = proto.Marshal(&gMsg)
+ if err != nil {
+ return nil
}
return buf
}
// decodeCompoundMessage splits a compound message and returns
-// the slices of individual messages. Also returns the number
-// of truncated messages and any potential error
-func decodeCompoundMessage(buf []byte) (trunc int, parts [][]byte, err error) {
- if len(buf) < 1 {
- err = fmt.Errorf("missing compound length byte")
- return
+// the slices of individual messages. Returns any potential error.
+func decodeCompoundMessage(buf []byte) ([][]byte, error) {
+ var cMsg CompoundMessage
+ if err := proto.Unmarshal(buf, &cMsg); err != nil {
+ return nil, err
}
- numParts := binary.BigEndian.Uint32(buf[0:4])
- buf = buf[4:]
- // Check we have enough bytes
- if len(buf) < int(numParts*2) {
- err = fmt.Errorf("truncated len slice")
- return
+ parts := make([][]byte, 0, len(cMsg.Messages))
+ for _, m := range cMsg.Messages {
+ parts = append(parts, m.Payload)
}
- // Decode the lengths
- lengths := make([]uint16, numParts)
- for i := 0; i < int(numParts); i++ {
- lengths[i] = binary.BigEndian.Uint16(buf[i*2 : i*2+2])
- }
- buf = buf[numParts*2:]
-
- // Split each message
- for idx, msgLen := range lengths {
- if len(buf) < int(msgLen) {
- trunc = int(numParts) - idx
- return
- }
-
- // Extract the slice, seek past on the buffer
- slice := buf[:msgLen]
- buf = buf[msgLen:]
- parts = append(parts, slice)
- }
- return
+ return parts, nil
}
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.go b/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.go
index 1c49371896..e02fe794af 100644
--- a/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.go
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.go
@@ -1,5 +1,7 @@
package networkdb
+//go:generate protoc -I.:../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/networkdb,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. networkdb.proto
+
import (
"fmt"
"strings"
@@ -75,6 +77,9 @@ type NetworkDB struct {
// List of all tickers which needed to be stopped when
// cleaning up.
tickers []*time.Ticker
+
+ // Reference to the memberlist's keyring to add & remove keys
+ keyring *memberlist.Keyring
}
// network describes the node/network attachment.
@@ -109,6 +114,10 @@ type Config struct {
// BindPort is the local node's port to which we bind to for
// cluster communication.
BindPort int
+
+ // Keys to be added to the Keyring of the memberlist. Key at index
+ // 0 is the primary key
+ Keys [][]byte
}
// entry defines a table entry
@@ -206,7 +215,7 @@ func (nDB *NetworkDB) CreateEntry(tname, nid, key string, value []byte) error {
value: value,
}
- if err := nDB.sendTableEvent(tableEntryCreate, nid, tname, key, entry); err != nil {
+ if err := nDB.sendTableEvent(TableEventTypeCreate, nid, tname, key, entry); err != nil {
return fmt.Errorf("cannot send table create event: %v", err)
}
@@ -234,7 +243,7 @@ func (nDB *NetworkDB) UpdateEntry(tname, nid, key string, value []byte) error {
value: value,
}
- if err := nDB.sendTableEvent(tableEntryUpdate, nid, tname, key, entry); err != nil {
+ if err := nDB.sendTableEvent(TableEventTypeUpdate, nid, tname, key, entry); err != nil {
return fmt.Errorf("cannot send table update event: %v", err)
}
@@ -264,7 +273,7 @@ func (nDB *NetworkDB) DeleteEntry(tname, nid, key string) error {
deleteTime: time.Now(),
}
- if err := nDB.sendTableEvent(tableEntryDelete, nid, tname, key, entry); err != nil {
+ if err := nDB.sendTableEvent(TableEventTypeDelete, nid, tname, key, entry); err != nil {
return fmt.Errorf("cannot send table delete event: %v", err)
}
@@ -352,7 +361,7 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error {
nDB.networkNodes[nid] = append(nDB.networkNodes[nid], nDB.config.NodeName)
nDB.Unlock()
- if err := nDB.sendNetworkEvent(nid, networkJoin, ltime); err != nil {
+ if err := nDB.sendNetworkEvent(nid, NetworkEventTypeJoin, ltime); err != nil {
return fmt.Errorf("failed to send leave network event for %s: %v", nid, err)
}
@@ -371,7 +380,7 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error {
// network.
func (nDB *NetworkDB) LeaveNetwork(nid string) error {
ltime := nDB.networkClock.Increment()
- if err := nDB.sendNetworkEvent(nid, networkLeave, ltime); err != nil {
+ if err := nDB.sendNetworkEvent(nid, NetworkEventTypeLeave, ltime); err != nil {
return fmt.Errorf("failed to send leave network event for %s: %v", nid, err)
}
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.pb.go b/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.pb.go
new file mode 100644
index 0000000000..86177cf315
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.pb.go
@@ -0,0 +1,2266 @@
+// Code generated by protoc-gen-gogo.
+// source: networkdb.proto
+// DO NOT EDIT!
+
+/*
+ Package networkdb is a generated protocol buffer package.
+
+ It is generated from these files:
+ networkdb.proto
+
+ It has these top-level messages:
+ GossipMessage
+ NetworkEvent
+ NetworkEntry
+ NetworkPushPull
+ TableEvent
+ BulkSyncMessage
+ CompoundMessage
+*/
+package networkdb
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import github_com_hashicorp_serf_serf "github.com/hashicorp/serf/serf"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+// MessageType enum defines all the core message types that networkdb
+// uses to communicate to peers.
+type MessageType int32
+
+const (
+ MessageTypeInvalid MessageType = 0
+ // NetworEvent message type is used to communicate network
+ // attachments on the node.
+ MessageTypeNetworkEvent MessageType = 1
+ // TableEvent message type is used to communicate any table
+ // CRUD event that happened on the node.
+ MessageTypeTableEvent MessageType = 2
+ // PushPull message type is used to syncup all network
+ // attachments on a peer node either during startup of this
+ // node or with a random peer node periodically thereafter.
+ MessageTypePushPull MessageType = 3
+ // BulkSync message is used to bulksync the whole networkdb
+ // state with a peer node during startup of this node or with
+ // a random peer node periodically thereafter.
+ MessageTypeBulkSync MessageType = 4
+ // Compound message type is used to form a compound message
+ // which is a pack of many message of above types, packed into
+ // a single compound message.
+ MessageTypeCompound MessageType = 5
+)
+
+var MessageType_name = map[int32]string{
+ 0: "INVALID",
+ 1: "NETWORK_EVENT",
+ 2: "TABLE_EVENT",
+ 3: "PUSH_PULL",
+ 4: "BULK_SYNC",
+ 5: "COMPOUND",
+}
+var MessageType_value = map[string]int32{
+ "INVALID": 0,
+ "NETWORK_EVENT": 1,
+ "TABLE_EVENT": 2,
+ "PUSH_PULL": 3,
+ "BULK_SYNC": 4,
+ "COMPOUND": 5,
+}
+
+func (x MessageType) String() string {
+ return proto.EnumName(MessageType_name, int32(x))
+}
+func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{0} }
+
+type NetworkEvent_Type int32
+
+const (
+ NetworkEventTypeInvalid NetworkEvent_Type = 0
+ // Join event is generated when this node joins a network.
+ NetworkEventTypeJoin NetworkEvent_Type = 1
+ // Leave event is generated when this node leaves a network.
+ NetworkEventTypeLeave NetworkEvent_Type = 2
+)
+
+var NetworkEvent_Type_name = map[int32]string{
+ 0: "INVALID",
+ 1: "JOIN",
+ 2: "LEAVE",
+}
+var NetworkEvent_Type_value = map[string]int32{
+ "INVALID": 0,
+ "JOIN": 1,
+ "LEAVE": 2,
+}
+
+func (x NetworkEvent_Type) String() string {
+ return proto.EnumName(NetworkEvent_Type_name, int32(x))
+}
+func (NetworkEvent_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{1, 0} }
+
+type TableEvent_Type int32
+
+const (
+ TableEventTypeInvalid TableEvent_Type = 0
+ // Create signifies that this table entry was just
+ // created.
+ TableEventTypeCreate TableEvent_Type = 1
+ // Update signifies that this table entry was just
+ // updated.
+ TableEventTypeUpdate TableEvent_Type = 2
+ // Delete signifies that this table entry was just
+ // updated.
+ TableEventTypeDelete TableEvent_Type = 3
+)
+
+var TableEvent_Type_name = map[int32]string{
+ 0: "INVALID",
+ 1: "CREATE",
+ 2: "UPDATE",
+ 3: "DELETE",
+}
+var TableEvent_Type_value = map[string]int32{
+ "INVALID": 0,
+ "CREATE": 1,
+ "UPDATE": 2,
+ "DELETE": 3,
+}
+
+func (x TableEvent_Type) String() string {
+ return proto.EnumName(TableEvent_Type_name, int32(x))
+}
+func (TableEvent_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{4, 0} }
+
+// GossipMessage is a basic message header used by all messages types.
+type GossipMessage struct {
+ Type MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.MessageType" json:"type,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *GossipMessage) Reset() { *m = GossipMessage{} }
+func (*GossipMessage) ProtoMessage() {}
+func (*GossipMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{0} }
+
+// NetworkEvent message payload definition.
+type NetworkEvent struct {
+ Type NetworkEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.NetworkEvent_Type" json:"type,omitempty"`
+ // Lamport time using a network lamport clock indicating the
+ // time this event was generated on the node where it was
+ // generated.
+ LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,2,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"`
+ // Source node name.
+ NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
+ // ID of the network for which the event is generated.
+ NetworkID string `protobuf:"bytes,4,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
+}
+
+func (m *NetworkEvent) Reset() { *m = NetworkEvent{} }
+func (*NetworkEvent) ProtoMessage() {}
+func (*NetworkEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{1} }
+
+// NetworkEntry for push pull of networks.
+type NetworkEntry struct {
+ // ID of the network
+ NetworkID string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
+ // Latest lamport time of the network attachment when this
+ // network event was recorded.
+ LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,2,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"`
+ // Source node name where this network attachment happened.
+ NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
+ // Indicates if a leave from this network is in progress.
+ Leaving bool `protobuf:"varint,4,opt,name=leaving,proto3" json:"leaving,omitempty"`
+}
+
+func (m *NetworkEntry) Reset() { *m = NetworkEntry{} }
+func (*NetworkEntry) ProtoMessage() {}
+func (*NetworkEntry) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{2} }
+
+// NetworkPushpull message payload definition.
+type NetworkPushPull struct {
+ // Lamport time when this push pull was initiated.
+ LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,1,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"`
+ Networks []*NetworkEntry `protobuf:"bytes,2,rep,name=networks" json:"networks,omitempty"`
+}
+
+func (m *NetworkPushPull) Reset() { *m = NetworkPushPull{} }
+func (*NetworkPushPull) ProtoMessage() {}
+func (*NetworkPushPull) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{3} }
+
+func (m *NetworkPushPull) GetNetworks() []*NetworkEntry {
+ if m != nil {
+ return m.Networks
+ }
+ return nil
+}
+
+// TableEvent message payload definition.
+type TableEvent struct {
+ Type TableEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.TableEvent_Type" json:"type,omitempty"`
+ // Lamport time when this event was generated.
+ LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,2,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"`
+ // Node name where this event originated.
+ NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
+ // ID of the network to which this table entry belongs.
+ NetworkID string `protobuf:"bytes,4,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
+ // Name of the table to which this table entry belongs.
+ TableName string `protobuf:"bytes,5,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
+ // Entry key.
+ Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"`
+ // Entry value.
+ Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *TableEvent) Reset() { *m = TableEvent{} }
+func (*TableEvent) ProtoMessage() {}
+func (*TableEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{4} }
+
+// BulkSync message payload definition.
+type BulkSyncMessage struct {
+ // Lamport time when this bulk sync was initiated.
+ LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,1,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"`
+ // Indicates if this bulksync is a response to a bulk sync
+ // request from a peer node.
+ Unsolicited bool `protobuf:"varint,2,opt,name=unsolicited,proto3" json:"unsolicited,omitempty"`
+ // Name of the node which is producing this bulk sync message.
+ NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
+ // List of network names whose table entries are getting
+ // bulksynced as part of the bulksync.
+ Networks []string `protobuf:"bytes,4,rep,name=networks" json:"networks,omitempty"`
+ // Bulksync payload
+ Payload []byte `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"`
+}
+
+func (m *BulkSyncMessage) Reset() { *m = BulkSyncMessage{} }
+func (*BulkSyncMessage) ProtoMessage() {}
+func (*BulkSyncMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{5} }
+
+// Compound message payload definition.
+type CompoundMessage struct {
+ // A list of simple messages.
+ Messages []*CompoundMessage_SimpleMessage `protobuf:"bytes,1,rep,name=messages" json:"messages,omitempty"`
+}
+
+func (m *CompoundMessage) Reset() { *m = CompoundMessage{} }
+func (*CompoundMessage) ProtoMessage() {}
+func (*CompoundMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{6} }
+
+func (m *CompoundMessage) GetMessages() []*CompoundMessage_SimpleMessage {
+ if m != nil {
+ return m.Messages
+ }
+ return nil
+}
+
+type CompoundMessage_SimpleMessage struct {
+ // Bytestring payload of a message constructed using
+ // other message type definitions.
+ Payload []byte `protobuf:"bytes,1,opt,name=Payload,json=payload,proto3" json:"Payload,omitempty"`
+}
+
+func (m *CompoundMessage_SimpleMessage) Reset() { *m = CompoundMessage_SimpleMessage{} }
+func (*CompoundMessage_SimpleMessage) ProtoMessage() {}
+func (*CompoundMessage_SimpleMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptorNetworkdb, []int{6, 0}
+}
+
+func init() {
+ proto.RegisterType((*GossipMessage)(nil), "networkdb.GossipMessage")
+ proto.RegisterType((*NetworkEvent)(nil), "networkdb.NetworkEvent")
+ proto.RegisterType((*NetworkEntry)(nil), "networkdb.NetworkEntry")
+ proto.RegisterType((*NetworkPushPull)(nil), "networkdb.NetworkPushPull")
+ proto.RegisterType((*TableEvent)(nil), "networkdb.TableEvent")
+ proto.RegisterType((*BulkSyncMessage)(nil), "networkdb.BulkSyncMessage")
+ proto.RegisterType((*CompoundMessage)(nil), "networkdb.CompoundMessage")
+ proto.RegisterType((*CompoundMessage_SimpleMessage)(nil), "networkdb.CompoundMessage.SimpleMessage")
+ proto.RegisterEnum("networkdb.MessageType", MessageType_name, MessageType_value)
+ proto.RegisterEnum("networkdb.NetworkEvent_Type", NetworkEvent_Type_name, NetworkEvent_Type_value)
+ proto.RegisterEnum("networkdb.TableEvent_Type", TableEvent_Type_name, TableEvent_Type_value)
+}
+func (this *GossipMessage) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&networkdb.GossipMessage{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NetworkEvent) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&networkdb.NetworkEvent{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n")
+ s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n")
+ s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NetworkEntry) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&networkdb.NetworkEntry{")
+ s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n")
+ s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n")
+ s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n")
+ s = append(s, "Leaving: "+fmt.Sprintf("%#v", this.Leaving)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NetworkPushPull) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&networkdb.NetworkPushPull{")
+ s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n")
+ if this.Networks != nil {
+ s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TableEvent) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 11)
+ s = append(s, "&networkdb.TableEvent{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n")
+ s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n")
+ s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n")
+ s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n")
+ s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *BulkSyncMessage) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&networkdb.BulkSyncMessage{")
+ s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n")
+ s = append(s, "Unsolicited: "+fmt.Sprintf("%#v", this.Unsolicited)+",\n")
+ s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n")
+ s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n")
+ s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CompoundMessage) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&networkdb.CompoundMessage{")
+ if this.Messages != nil {
+ s = append(s, "Messages: "+fmt.Sprintf("%#v", this.Messages)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CompoundMessage_SimpleMessage) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&networkdb.CompoundMessage_SimpleMessage{")
+ s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringNetworkdb(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringNetworkdb(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *GossipMessage) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GossipMessage) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Type != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.Type))
+ }
+ if len(m.Data) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ return i, nil
+}
+
+func (m *NetworkEvent) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkEvent) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Type != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.Type))
+ }
+ if m.LTime != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+ }
+ if len(m.NodeName) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
+ i += copy(data[i:], m.NodeName)
+ }
+ if len(m.NetworkID) > 0 {
+ data[i] = 0x22
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.NetworkID)))
+ i += copy(data[i:], m.NetworkID)
+ }
+ return i, nil
+}
+
+func (m *NetworkEntry) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkEntry) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NetworkID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.NetworkID)))
+ i += copy(data[i:], m.NetworkID)
+ }
+ if m.LTime != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+ }
+ if len(m.NodeName) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
+ i += copy(data[i:], m.NodeName)
+ }
+ if m.Leaving {
+ data[i] = 0x20
+ i++
+ if m.Leaving {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *NetworkPushPull) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkPushPull) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.LTime != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+ }
+ if len(m.Networks) > 0 {
+ for _, msg := range m.Networks {
+ data[i] = 0x12
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *TableEvent) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TableEvent) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Type != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.Type))
+ }
+ if m.LTime != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+ }
+ if len(m.NodeName) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
+ i += copy(data[i:], m.NodeName)
+ }
+ if len(m.NetworkID) > 0 {
+ data[i] = 0x22
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.NetworkID)))
+ i += copy(data[i:], m.NetworkID)
+ }
+ if len(m.TableName) > 0 {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.TableName)))
+ i += copy(data[i:], m.TableName)
+ }
+ if len(m.Key) > 0 {
+ data[i] = 0x32
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ }
+ if len(m.Value) > 0 {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.Value)))
+ i += copy(data[i:], m.Value)
+ }
+ return i, nil
+}
+
+func (m *BulkSyncMessage) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *BulkSyncMessage) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.LTime != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+ }
+ if m.Unsolicited {
+ data[i] = 0x10
+ i++
+ if m.Unsolicited {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if len(m.NodeName) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
+ i += copy(data[i:], m.NodeName)
+ }
+ if len(m.Networks) > 0 {
+ for _, s := range m.Networks {
+ data[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Payload) > 0 {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.Payload)))
+ i += copy(data[i:], m.Payload)
+ }
+ return i, nil
+}
+
+func (m *CompoundMessage) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CompoundMessage) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Messages) > 0 {
+ for _, msg := range m.Messages {
+ data[i] = 0xa
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *CompoundMessage_SimpleMessage) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CompoundMessage_SimpleMessage) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Payload) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintNetworkdb(data, i, uint64(len(m.Payload)))
+ i += copy(data[i:], m.Payload)
+ }
+ return i, nil
+}
+
+func encodeFixed64Networkdb(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Networkdb(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintNetworkdb(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *GossipMessage) Size() (n int) {
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovNetworkdb(uint64(m.Type))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ return n
+}
+
+func (m *NetworkEvent) Size() (n int) {
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovNetworkdb(uint64(m.Type))
+ }
+ if m.LTime != 0 {
+ n += 1 + sovNetworkdb(uint64(m.LTime))
+ }
+ l = len(m.NodeName)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ l = len(m.NetworkID)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ return n
+}
+
+func (m *NetworkEntry) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NetworkID)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ if m.LTime != 0 {
+ n += 1 + sovNetworkdb(uint64(m.LTime))
+ }
+ l = len(m.NodeName)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ if m.Leaving {
+ n += 2
+ }
+ return n
+}
+
+func (m *NetworkPushPull) Size() (n int) {
+ var l int
+ _ = l
+ if m.LTime != 0 {
+ n += 1 + sovNetworkdb(uint64(m.LTime))
+ }
+ if len(m.Networks) > 0 {
+ for _, e := range m.Networks {
+ l = e.Size()
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TableEvent) Size() (n int) {
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovNetworkdb(uint64(m.Type))
+ }
+ if m.LTime != 0 {
+ n += 1 + sovNetworkdb(uint64(m.LTime))
+ }
+ l = len(m.NodeName)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ l = len(m.NetworkID)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ l = len(m.TableName)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ return n
+}
+
+func (m *BulkSyncMessage) Size() (n int) {
+ var l int
+ _ = l
+ if m.LTime != 0 {
+ n += 1 + sovNetworkdb(uint64(m.LTime))
+ }
+ if m.Unsolicited {
+ n += 2
+ }
+ l = len(m.NodeName)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ if len(m.Networks) > 0 {
+ for _, s := range m.Networks {
+ l = len(s)
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ }
+ l = len(m.Payload)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ return n
+}
+
+func (m *CompoundMessage) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Messages) > 0 {
+ for _, e := range m.Messages {
+ l = e.Size()
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CompoundMessage_SimpleMessage) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Payload)
+ if l > 0 {
+ n += 1 + l + sovNetworkdb(uint64(l))
+ }
+ return n
+}
+
+func sovNetworkdb(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozNetworkdb(x uint64) (n int) {
+ return sovNetworkdb(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *GossipMessage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GossipMessage{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkEvent) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkEvent{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`,
+ `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+ `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkEntry) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkEntry{`,
+ `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`,
+ `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`,
+ `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+ `Leaving:` + fmt.Sprintf("%v", this.Leaving) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPushPull) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkPushPull{`,
+ `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`,
+ `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkEntry", "NetworkEntry", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TableEvent) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TableEvent{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`,
+ `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+ `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`,
+ `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BulkSyncMessage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BulkSyncMessage{`,
+ `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`,
+ `Unsolicited:` + fmt.Sprintf("%v", this.Unsolicited) + `,`,
+ `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+ `Networks:` + fmt.Sprintf("%v", this.Networks) + `,`,
+ `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CompoundMessage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CompoundMessage{`,
+ `Messages:` + strings.Replace(fmt.Sprintf("%v", this.Messages), "CompoundMessage_SimpleMessage", "CompoundMessage_SimpleMessage", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CompoundMessage_SimpleMessage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CompoundMessage_SimpleMessage{`,
+ `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringNetworkdb(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *GossipMessage) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GossipMessage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GossipMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (MessageType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkEvent) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkEvent: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (NetworkEvent_Type(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType)
+ }
+ m.LTime = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkEntry) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType)
+ }
+ m.LTime = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leaving", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Leaving = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPushPull) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPushPull: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPushPull: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType)
+ }
+ m.LTime = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Networks = append(m.Networks, &NetworkEntry{})
+ if err := m.Networks[len(m.Networks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TableEvent) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TableEvent: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TableEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (TableEvent_Type(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType)
+ }
+ m.LTime = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TableName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], data[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BulkSyncMessage) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BulkSyncMessage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BulkSyncMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType)
+ }
+ m.LTime = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Unsolicited", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Unsolicited = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Networks = append(m.Networks, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Payload = append(m.Payload[:0], data[iNdEx:postIndex]...)
+ if m.Payload == nil {
+ m.Payload = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompoundMessage) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompoundMessage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompoundMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Messages = append(m.Messages, &CompoundMessage_SimpleMessage{})
+ if err := m.Messages[len(m.Messages)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompoundMessage_SimpleMessage) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SimpleMessage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SimpleMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Payload = append(m.Payload[:0], data[iNdEx:postIndex]...)
+ if m.Payload == nil {
+ m.Payload = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNetworkdb(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNetworkdb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipNetworkdb(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthNetworkdb
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNetworkdb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipNetworkdb(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthNetworkdb = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowNetworkdb = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorNetworkdb = []byte{
+ // 812 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x95, 0x4d, 0x6f, 0xe2, 0x46,
+ 0x18, 0xc7, 0x31, 0x18, 0x02, 0x0f, 0xd0, 0x20, 0x27, 0x4d, 0x5c, 0xa7, 0x25, 0x91, 0x9b, 0x46,
+ 0x14, 0x55, 0x4e, 0x95, 0x7c, 0x02, 0x5e, 0xac, 0x96, 0xc4, 0x31, 0xc8, 0x40, 0xaa, 0x9e, 0x90,
+ 0xc1, 0x53, 0xb0, 0x62, 0x6c, 0x0b, 0x9b, 0x54, 0xdc, 0xaa, 0x9e, 0xa2, 0xde, 0x7a, 0xad, 0xd4,
+ 0x53, 0x7b, 0xee, 0x07, 0xe8, 0xa1, 0xe7, 0xa8, 0xa7, 0xf6, 0xb6, 0xda, 0x43, 0xb4, 0xc9, 0x27,
+ 0xd8, 0x8f, 0xb0, 0xe3, 0xc1, 0x86, 0x81, 0x44, 0xb9, 0xec, 0x6a, 0xb5, 0x07, 0xc3, 0xbc, 0xfc,
+ 0xe6, 0xd1, 0xff, 0x79, 0xe6, 0x3f, 0x33, 0xb0, 0x69, 0x23, 0xff, 0x47, 0x67, 0x72, 0x65, 0xf4,
+ 0x25, 0x77, 0xe2, 0xf8, 0x0e, 0x97, 0x59, 0x0c, 0x08, 0xdb, 0x43, 0x67, 0xe8, 0x90, 0xd1, 0xe3,
+ 0xa0, 0x35, 0x07, 0xc4, 0x26, 0xe4, 0xbf, 0x71, 0x3c, 0xcf, 0x74, 0x2f, 0x90, 0xe7, 0xe9, 0x43,
+ 0xc4, 0x95, 0x81, 0xf5, 0x67, 0x2e, 0xe2, 0x99, 0x03, 0xa6, 0xf4, 0xd1, 0xc9, 0x8e, 0xb4, 0x8c,
+ 0x18, 0x12, 0x1d, 0x3c, 0xab, 0x11, 0x86, 0xe3, 0x80, 0x35, 0x74, 0x5f, 0xe7, 0xe3, 0x98, 0xcd,
+ 0x69, 0xa4, 0x2d, 0xde, 0xc7, 0x21, 0xa7, 0xce, 0xd7, 0xc8, 0xd7, 0xc8, 0xf6, 0xb9, 0xaf, 0x57,
+ 0x02, 0x7e, 0x4a, 0x05, 0xa4, 0x31, 0x89, 0x0a, 0xdb, 0x80, 0x94, 0xd5, 0xf3, 0xcd, 0x31, 0x22,
+ 0x81, 0xd9, 0xea, 0xc9, 0xed, 0xdd, 0x7e, 0xec, 0xe5, 0xdd, 0x7e, 0x79, 0x68, 0xfa, 0xa3, 0x69,
+ 0x5f, 0x1a, 0x38, 0xe3, 0xe3, 0x91, 0xee, 0x8d, 0xcc, 0x81, 0x33, 0x71, 0x8f, 0x3d, 0x34, 0xf9,
+ 0x81, 0xfc, 0x48, 0x8a, 0x3e, 0x76, 0x9d, 0x89, 0xdf, 0xc1, 0x2b, 0xb5, 0xa4, 0x15, 0xfc, 0x71,
+ 0x7b, 0x90, 0xb1, 0x1d, 0x03, 0xf5, 0x6c, 0x1d, 0x47, 0x4b, 0xe0, 0x68, 0x19, 0x2d, 0x1d, 0x0c,
+ 0xa8, 0xb8, 0xcf, 0x7d, 0x05, 0x10, 0x8a, 0xe9, 0x99, 0x06, 0xcf, 0x06, 0xb3, 0xd5, 0xfc, 0xc3,
+ 0xdd, 0x7e, 0x26, 0x14, 0xd6, 0xa8, 0x6b, 0x51, 0xfd, 0x1a, 0x86, 0x78, 0xc3, 0x00, 0x1b, 0x88,
+ 0xe4, 0x4a, 0xb0, 0xd1, 0x50, 0x2f, 0x2b, 0x4a, 0xa3, 0x5e, 0x88, 0x09, 0x7b, 0xbf, 0xfc, 0x7e,
+ 0xb0, 0x4b, 0x27, 0x12, 0x20, 0x0d, 0xfb, 0x5a, 0xb7, 0x4c, 0x83, 0x13, 0x81, 0x3d, 0x6b, 0x36,
+ 0xd4, 0x02, 0x23, 0xf0, 0x18, 0xdb, 0x5e, 0xc7, 0xce, 0x1c, 0xd3, 0xe6, 0x0e, 0x21, 0xa9, 0xc8,
+ 0x95, 0x4b, 0xb9, 0x10, 0x17, 0x3e, 0xc1, 0xd0, 0xc7, 0xeb, 0x90, 0x82, 0xf4, 0x6b, 0x24, 0xe4,
+ 0x6e, 0xfe, 0x28, 0xc6, 0xfe, 0xfe, 0xb3, 0x48, 0x14, 0x88, 0xff, 0x30, 0xcb, 0x1a, 0xdb, 0xfe,
+ 0x64, 0xb6, 0x96, 0x09, 0xf3, 0x7c, 0x26, 0xef, 0xad, 0xbe, 0x3c, 0x6c, 0x58, 0x58, 0xbd, 0x69,
+ 0x0f, 0x49, 0x71, 0xd3, 0x5a, 0xd4, 0x15, 0x7f, 0x65, 0x60, 0x33, 0x94, 0xd6, 0x9a, 0x7a, 0xa3,
+ 0xd6, 0xd4, 0xb2, 0x28, 0x55, 0xcc, 0xdb, 0xaa, 0x3a, 0x85, 0x74, 0x98, 0xad, 0x87, 0x53, 0x4c,
+ 0x94, 0xb2, 0x27, 0xbb, 0x4f, 0xd8, 0x2e, 0xa8, 0x9c, 0xb6, 0x00, 0xc5, 0x7f, 0x13, 0x00, 0x1d,
+ 0xbd, 0x6f, 0xa1, 0xb9, 0x6d, 0xa5, 0x15, 0xdb, 0x0a, 0xd4, 0xfa, 0x25, 0xf4, 0xc1, 0x9b, 0x96,
+ 0xfb, 0x0c, 0xc0, 0x0f, 0xe4, 0xce, 0x63, 0x25, 0x49, 0xac, 0x0c, 0x19, 0x21, 0xc1, 0x0a, 0x90,
+ 0xb8, 0x42, 0x33, 0x3e, 0x45, 0xc6, 0x83, 0x26, 0xb7, 0x0d, 0x49, 0xec, 0xdd, 0x29, 0xe2, 0x37,
+ 0xc8, 0x99, 0x9e, 0x77, 0xc4, 0xbf, 0x22, 0xef, 0x1f, 0xd1, 0xde, 0x27, 0x7e, 0x5d, 0x56, 0x83,
+ 0x76, 0xfe, 0x21, 0xa4, 0x6a, 0x9a, 0x5c, 0xe9, 0xc8, 0x91, 0xf7, 0x57, 0xb1, 0xda, 0x04, 0xe9,
+ 0x3e, 0x0a, 0xa8, 0x6e, 0xab, 0x1e, 0x50, 0xf1, 0xa7, 0xa8, 0xae, 0x6b, 0x84, 0x54, 0x5d, 0x56,
+ 0x64, 0x4c, 0x25, 0x9e, 0xa2, 0xea, 0xc8, 0x42, 0xfe, 0xfa, 0x09, 0xf9, 0x1f, 0x1b, 0xac, 0x3a,
+ 0xb5, 0xae, 0xda, 0x33, 0x7b, 0x10, 0xdd, 0x6c, 0xef, 0xd0, 0x60, 0x07, 0x90, 0x9d, 0xda, 0x9e,
+ 0x63, 0x99, 0x03, 0xd3, 0x47, 0x06, 0xd9, 0xf1, 0xb4, 0x46, 0x0f, 0x3d, 0xbf, 0x87, 0x02, 0xe5,
+ 0x4f, 0x16, 0xfb, 0x33, 0xb3, 0xb4, 0x61, 0x70, 0x68, 0x5c, 0x7d, 0x66, 0x39, 0xba, 0x41, 0xb6,
+ 0x2b, 0xa7, 0x45, 0x5d, 0xf1, 0x67, 0x9c, 0x53, 0xcd, 0xc1, 0x5a, 0xa6, 0xb6, 0x11, 0xe5, 0x54,
+ 0x87, 0xf4, 0x78, 0xde, 0xf4, 0x70, 0x56, 0x81, 0xd3, 0x4b, 0x94, 0x53, 0xd7, 0x68, 0xa9, 0x6d,
+ 0x8e, 0x5d, 0x0b, 0x85, 0x3d, 0x6d, 0xb1, 0x52, 0xf8, 0x12, 0xf2, 0x2b, 0x53, 0x81, 0x88, 0x56,
+ 0x28, 0x82, 0x59, 0x11, 0x51, 0xfe, 0x2d, 0x0e, 0x59, 0xea, 0x21, 0xe0, 0x3e, 0xa7, 0x0d, 0xb1,
+ 0x83, 0x77, 0x87, 0xa3, 0x66, 0x23, 0x37, 0x48, 0x90, 0x57, 0xe5, 0xce, 0x77, 0x4d, 0xed, 0xbc,
+ 0x27, 0x5f, 0xca, 0x6a, 0x07, 0x9b, 0x82, 0xdc, 0x9b, 0x14, 0xba, 0xf2, 0x64, 0x94, 0x21, 0xdb,
+ 0xa9, 0x54, 0x15, 0x39, 0xa4, 0xc3, 0x9b, 0x91, 0xa2, 0xa9, 0x73, 0x7a, 0x04, 0x99, 0x56, 0xb7,
+ 0xfd, 0x6d, 0xaf, 0xd5, 0x55, 0x14, 0x6c, 0x90, 0x5d, 0x4c, 0x6e, 0x51, 0xe4, 0xe2, 0x7a, 0xc1,
+ 0x5c, 0xb5, 0xab, 0x9c, 0xf7, 0xda, 0xdf, 0xab, 0xb5, 0x02, 0xfb, 0x88, 0x8b, 0xcc, 0xc2, 0x7d,
+ 0x01, 0xe9, 0x5a, 0xf3, 0xa2, 0xd5, 0xec, 0xaa, 0xf5, 0x42, 0xf2, 0x11, 0x16, 0x55, 0x54, 0xd8,
+ 0x0a, 0xed, 0x46, 0x17, 0xa3, 0xca, 0xbf, 0xb8, 0x2f, 0xc6, 0x5e, 0xdf, 0x17, 0x99, 0x9f, 0x1e,
+ 0x8a, 0xcc, 0x2d, 0xfe, 0xfe, 0xc3, 0xdf, 0x2b, 0xfc, 0xf5, 0x53, 0xe4, 0xb5, 0x3d, 0x7d, 0x13,
+ 0x00, 0x00, 0xff, 0xff, 0x7d, 0x9c, 0x5f, 0x56, 0xa1, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.proto b/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.proto
new file mode 100644
index 0000000000..dbc7a921d0
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/networkdb.proto
@@ -0,0 +1,156 @@
+syntax = "proto3";
+
+import "gogoproto/gogo.proto";
+
+package networkdb;
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.stringer_all) = true;
+option (gogoproto.gostring_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.goproto_stringer_all) = false;
+
+// MessageType enum defines all the core message types that networkdb
+// uses to communicate to peers.
+enum MessageType {
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "MessageType";
+
+ INVALID = 0 [(gogoproto.enumvalue_customname) = "MessageTypeInvalid"];
+
+ // NetworEvent message type is used to communicate network
+ // attachments on the node.
+ NETWORK_EVENT = 1 [(gogoproto.enumvalue_customname) = "MessageTypeNetworkEvent"];
+
+ // TableEvent message type is used to communicate any table
+ // CRUD event that happened on the node.
+ TABLE_EVENT = 2 [(gogoproto.enumvalue_customname) = "MessageTypeTableEvent"];
+
+ // PushPull message type is used to syncup all network
+ // attachments on a peer node either during startup of this
+ // node or with a random peer node periodically thereafter.
+ PUSH_PULL = 3 [(gogoproto.enumvalue_customname) = "MessageTypePushPull"];
+
+ // BulkSync message is used to bulksync the whole networkdb
+ // state with a peer node during startup of this node or with
+ // a random peer node periodically thereafter.
+ BULK_SYNC = 4 [(gogoproto.enumvalue_customname) = "MessageTypeBulkSync"];
+
+ // Compound message type is used to form a compound message
+ // which is a pack of many message of above types, packed into
+ // a single compound message.
+ COMPOUND = 5 [(gogoproto.enumvalue_customname) = "MessageTypeCompound"];
+}
+
+// GossipMessage is a basic message header used by all messages types.
+message GossipMessage {
+ MessageType type = 1; // type defines one of the message types defined above.
+ bytes data = 2; // Payload of the message of any type defined here.
+}
+
+// NetworkEvent message payload definition.
+message NetworkEvent {
+ enum Type {
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "Type";
+
+ INVALID = 0 [(gogoproto.enumvalue_customname) = "NetworkEventTypeInvalid"];
+ // Join event is generated when this node joins a network.
+ JOIN = 1 [(gogoproto.enumvalue_customname) = "NetworkEventTypeJoin"];;
+ // Leave event is generated when this node leaves a network.
+ LEAVE = 2 [(gogoproto.enumvalue_customname) = "NetworkEventTypeLeave"];;
+ }
+
+ Type type = 1;
+
+ // Lamport time using a network lamport clock indicating the
+ // time this event was generated on the node where it was
+ // generated.
+ uint64 l_time = 2 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
+ // Source node name.
+ string node_name = 3;
+ // ID of the network for which the event is generated.
+ string network_id = 4 [(gogoproto.customname) = "NetworkID"];
+}
+
+// NetworkEntry for push pull of networks.
+message NetworkEntry {
+ // ID of the network
+ string network_id = 1 [(gogoproto.customname) = "NetworkID"];
+ // Latest lamport time of the network attachment when this
+ // network event was recorded.
+ uint64 l_time = 2 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
+ // Source node name where this network attachment happened.
+ string node_name = 3;
+ // Indicates if a leave from this network is in progress.
+ bool leaving = 4;
+}
+
+// NetworkPushpull message payload definition.
+message NetworkPushPull {
+ // Lamport time when this push pull was initiated.
+ uint64 l_time = 1 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
+ repeated NetworkEntry networks = 2;
+}
+
+// TableEvent message payload definition.
+message TableEvent {
+ enum Type {
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "Type";
+
+ INVALID = 0 [(gogoproto.enumvalue_customname) = "TableEventTypeInvalid"];
+ // Create signifies that this table entry was just
+ // created.
+ CREATE = 1 [(gogoproto.enumvalue_customname) = "TableEventTypeCreate"];
+ // Update signifies that this table entry was just
+ // updated.
+ UPDATE = 2 [(gogoproto.enumvalue_customname) = "TableEventTypeUpdate"];
+ // Delete signifies that this table entry was just
+ // updated.
+ DELETE = 3 [(gogoproto.enumvalue_customname) = "TableEventTypeDelete"];
+ }
+
+ Type type = 1;
+ // Lamport time when this event was generated.
+ uint64 l_time = 2 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
+ // Node name where this event originated.
+ string node_name = 3;
+ // ID of the network to which this table entry belongs.
+ string network_id = 4 [(gogoproto.customname) = "NetworkID"];
+ // Name of the table to which this table entry belongs.
+ string table_name = 5;
+ // Entry key.
+ string key = 6;
+ // Entry value.
+ bytes value = 7;
+}
+
+// BulkSync message payload definition.
+message BulkSyncMessage {
+ // Lamport time when this bulk sync was initiated.
+ uint64 l_time = 1 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
+ // Indicates if this bulksync is a response to a bulk sync
+ // request from a peer node.
+ bool unsolicited = 2;
+ // Name of the node which is producing this bulk sync message.
+ string node_name = 3;
+ // List of network names whose table entries are getting
+ // bulksynced as part of the bulksync.
+ repeated string networks = 4;
+ // Bulksync payload
+ bytes payload = 5;
+}
+
+// Compound message payload definition.
+message CompoundMessage {
+ message SimpleMessage {
+ // Bytestring payload of a message constructed using
+ // other message type definitions.
+ bytes Payload = 1;
+ }
+
+ // A list of simple messages.
+ repeated SimpleMessage messages = 1;
+} \ No newline at end of file
diff --git a/vendor/src/github.com/docker/libnetwork/ns/init_linux.go b/vendor/src/github.com/docker/libnetwork/ns/init_linux.go
index 7595a5da17..ea0cfc0593 100644
--- a/vendor/src/github.com/docker/libnetwork/ns/init_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/ns/init_linux.go
@@ -3,13 +3,19 @@ package ns
import (
"fmt"
"os"
+ "sync"
"syscall"
log "github.com/Sirupsen/logrus"
+ "github.com/vishvananda/netlink"
"github.com/vishvananda/netns"
)
-var initNs netns.NsHandle
+var (
+ initNs netns.NsHandle
+ initNl *netlink.Handle
+ initOnce sync.Once
+)
// Init initializes a new network namespace
func Init() {
@@ -18,6 +24,10 @@ func Init() {
if err != nil {
log.Errorf("could not get initial namespace: %v", err)
}
+ initNl, err = netlink.NewHandle()
+ if err != nil {
+ log.Errorf("could not create netlink handle on initial namespace: %v", err)
+ }
}
// SetNamespace sets the initial namespace handler
@@ -27,17 +37,28 @@ func SetNamespace() error {
if linkErr != nil {
linkInfo = linkErr.Error()
}
-
return fmt.Errorf("failed to set to initial namespace, %v, initns fd %d: %v", linkInfo, initNs, err)
}
return nil
}
-// ParseHandlerInt transforms the namespace handler into a integer
+// ParseHandlerInt transforms the namespace handler into an integer
func ParseHandlerInt() int {
- return int(initNs)
+ return int(getHandler())
+}
+
+// GetHandler returns the namespace handler
+func getHandler() netns.NsHandle {
+ initOnce.Do(Init)
+ return initNs
}
func getLink() (string, error) {
return os.Readlink(fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()))
}
+
+// NlHandle returns the netlink handler
+func NlHandle() *netlink.Handle {
+ initOnce.Do(Init)
+ return initNl
+}
diff --git a/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go b/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go
index 081ff2eda9..63fcf5561d 100644
--- a/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go
@@ -3,14 +3,15 @@ package osl
import (
"fmt"
"net"
- "os/exec"
"regexp"
"sync"
"syscall"
log "github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/types"
"github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
)
// IfaceOption is a function option type to set interface options
@@ -24,6 +25,7 @@ type nwIface struct {
mac net.HardwareAddr
address *net.IPNet
addressIPv6 *net.IPNet
+ llAddrs []*net.IPNet
routes []*net.IPNet
bridge bool
ns *networkNamespace
@@ -86,6 +88,13 @@ func (i *nwIface) AddressIPv6() *net.IPNet {
return types.GetIPNetCopy(i.addressIPv6)
}
+func (i *nwIface) LinkLocalAddresses() []*net.IPNet {
+ i.Lock()
+ defer i.Unlock()
+
+ return i.llAddrs
+}
+
func (i *nwIface) Routes() []*net.IPNet {
i.Lock()
defer i.Unlock()
@@ -118,52 +127,50 @@ func (i *nwIface) Remove() error {
i.Unlock()
n.Lock()
- path := n.path
isDefault := n.isDefault
+ nlh := n.nlHandle
n.Unlock()
- return nsInvoke(path, func(nsFD int) error { return nil }, func(callerFD int) error {
- // Find the network inteerface identified by the DstName attribute.
- iface, err := netlink.LinkByName(i.DstName())
- if err != nil {
- return err
- }
+ // Find the network inteerface identified by the DstName attribute.
+ iface, err := nlh.LinkByName(i.DstName())
+ if err != nil {
+ return err
+ }
- // Down the interface before configuring
- if err := netlink.LinkSetDown(iface); err != nil {
- return err
- }
+ // Down the interface before configuring
+ if err := nlh.LinkSetDown(iface); err != nil {
+ return err
+ }
- err = netlink.LinkSetName(iface, i.SrcName())
- if err != nil {
- log.Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err)
- return err
- }
+ err = nlh.LinkSetName(iface, i.SrcName())
+ if err != nil {
+ log.Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err)
+ return err
+ }
- // if it is a bridge just delete it.
- if i.Bridge() {
- if err := netlink.LinkDel(iface); err != nil {
- return fmt.Errorf("failed deleting bridge %q: %v", i.SrcName(), err)
- }
- } else if !isDefault {
- // Move the network interface to caller namespace.
- if err := netlink.LinkSetNsFd(iface, callerFD); err != nil {
- log.Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err)
- return err
- }
+ // if it is a bridge just delete it.
+ if i.Bridge() {
+ if err := nlh.LinkDel(iface); err != nil {
+ return fmt.Errorf("failed deleting bridge %q: %v", i.SrcName(), err)
}
+ } else if !isDefault {
+ // Move the network interface to caller namespace.
+ if err := nlh.LinkSetNsFd(iface, ns.ParseHandlerInt()); err != nil {
+ log.Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err)
+ return err
+ }
+ }
- n.Lock()
- for index, intf := range n.iFaces {
- if intf == i {
- n.iFaces = append(n.iFaces[:index], n.iFaces[index+1:]...)
- break
- }
+ n.Lock()
+ for index, intf := range n.iFaces {
+ if intf == i {
+ n.iFaces = append(n.iFaces[:index], n.iFaces[index+1:]...)
+ break
}
- n.Unlock()
+ }
+ n.Unlock()
- return nil
- })
+ return nil
}
// Returns the sandbox's side veth interface statistics
@@ -172,28 +179,24 @@ func (i *nwIface) Statistics() (*types.InterfaceStatistics, error) {
n := i.ns
i.Unlock()
- n.Lock()
- path := n.path
- n.Unlock()
-
- s := &types.InterfaceStatistics{}
-
- err := nsInvoke(path, func(nsFD int) error { return nil }, func(callerFD int) error {
- // For some reason ioutil.ReadFile(netStatsFile) reads the file in
- // the default netns when this code is invoked from docker.
- // Executing "cat <netStatsFile>" works as expected.
- data, err := exec.Command("cat", netStatsFile).Output()
- if err != nil {
- return fmt.Errorf("failure opening %s: %v", netStatsFile, err)
- }
- return scanInterfaceStats(string(data), i.DstName(), s)
- })
-
+ l, err := n.nlHandle.LinkByName(i.DstName())
if err != nil {
- err = fmt.Errorf("failed to retrieve the statistics for %s in netns %s: %v", i.DstName(), path, err)
+ return nil, fmt.Errorf("failed to retrieve the statistics for %s in netns %s: %v", i.DstName(), n.path, err)
}
- return s, err
+ stats := l.Attrs().Statistics
+ if stats == nil {
+ return nil, fmt.Errorf("no statistics were returned")
+ }
+
+ return &types.InterfaceStatistics{
+ RxBytes: uint64(stats.RxBytes),
+ TxBytes: uint64(stats.TxBytes),
+ RxPackets: uint64(stats.RxPackets),
+ TxPackets: uint64(stats.TxPackets),
+ RxDropped: uint64(stats.RxDropped),
+ TxDropped: uint64(stats.TxDropped),
+ }, nil
}
func (n *networkNamespace) findDst(srcName string, isBridge bool) string {
@@ -233,17 +236,24 @@ func (n *networkNamespace) AddInterface(srcName, dstPrefix string, options ...If
path := n.path
isDefault := n.isDefault
+ nlh := n.nlHandle
+ nlhHost := ns.NlHandle()
n.Unlock()
- return nsInvoke(path, func(nsFD int) error {
- // If it is a bridge interface we have to create the bridge inside
- // the namespace so don't try to lookup the interface using srcName
- if i.bridge {
- return nil
+ // If it is a bridge interface we have to create the bridge inside
+ // the namespace so don't try to lookup the interface using srcName
+ if i.bridge {
+ link := &netlink.Bridge{
+ LinkAttrs: netlink.LinkAttrs{
+ Name: i.srcName,
+ },
}
-
+ if err := nlh.LinkAdd(link); err != nil {
+ return fmt.Errorf("failed to create bridge %q: %v", i.srcName, err)
+ }
+ } else {
// Find the network interface identified by the SrcName attribute.
- iface, err := netlink.LinkByName(i.srcName)
+ iface, err := nlhHost.LinkByName(i.srcName)
if err != nil {
return fmt.Errorf("failed to get link by name %q: %v", i.srcName, err)
}
@@ -252,63 +262,54 @@ func (n *networkNamespace) AddInterface(srcName, dstPrefix string, options ...If
// namespace only if the namespace is not a default
// type
if !isDefault {
- if err := netlink.LinkSetNsFd(iface, nsFD); err != nil {
- return fmt.Errorf("failed to set namespace on link %q: %v", i.srcName, err)
- }
- }
-
- return nil
- }, func(callerFD int) error {
- if i.bridge {
- link := &netlink.Bridge{
- LinkAttrs: netlink.LinkAttrs{
- Name: i.srcName,
- },
+ newNs, err := netns.GetFromPath(path)
+ if err != nil {
+ return fmt.Errorf("failed get network namespace %q: %v", path, err)
}
-
- if err := netlink.LinkAdd(link); err != nil {
- return fmt.Errorf("failed to create bridge %q: %v", i.srcName, err)
+ defer newNs.Close()
+ if err := nlhHost.LinkSetNsFd(iface, int(newNs)); err != nil {
+ return fmt.Errorf("failed to set namespace on link %q: %v", i.srcName, err)
}
}
+ }
- // Find the network interface identified by the SrcName attribute.
- iface, err := netlink.LinkByName(i.srcName)
- if err != nil {
- return fmt.Errorf("failed to get link by name %q: %v", i.srcName, err)
- }
+ // Find the network interface identified by the SrcName attribute.
+ iface, err := nlh.LinkByName(i.srcName)
+ if err != nil {
+ return fmt.Errorf("failed to get link by name %q: %v", i.srcName, err)
+ }
- // Down the interface before configuring
- if err := netlink.LinkSetDown(iface); err != nil {
- return fmt.Errorf("failed to set link down: %v", err)
- }
+ // Down the interface before configuring
+ if err := nlh.LinkSetDown(iface); err != nil {
+ return fmt.Errorf("failed to set link down: %v", err)
+ }
- // Configure the interface now this is moved in the proper namespace.
- if err := configureInterface(iface, i); err != nil {
- return err
- }
+ // Configure the interface now this is moved in the proper namespace.
+ if err := configureInterface(nlh, iface, i); err != nil {
+ return err
+ }
- // Up the interface.
- if err := netlink.LinkSetUp(iface); err != nil {
- return fmt.Errorf("failed to set link up: %v", err)
- }
+ // Up the interface.
+ if err := nlh.LinkSetUp(iface); err != nil {
+ return fmt.Errorf("failed to set link up: %v", err)
+ }
- // Set the routes on the interface. This can only be done when the interface is up.
- if err := setInterfaceRoutes(iface, i); err != nil {
- return fmt.Errorf("error setting interface %q routes to %q: %v", iface.Attrs().Name, i.Routes(), err)
- }
+ // Set the routes on the interface. This can only be done when the interface is up.
+ if err := setInterfaceRoutes(nlh, iface, i); err != nil {
+ return fmt.Errorf("error setting interface %q routes to %q: %v", iface.Attrs().Name, i.Routes(), err)
+ }
- n.Lock()
- n.iFaces = append(n.iFaces, i)
- n.Unlock()
+ n.Lock()
+ n.iFaces = append(n.iFaces, i)
+ n.Unlock()
- return nil
- })
+ return nil
}
-func configureInterface(iface netlink.Link, i *nwIface) error {
+func configureInterface(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
ifaceName := iface.Attrs().Name
ifaceConfigurators := []struct {
- Fn func(netlink.Link, *nwIface) error
+ Fn func(*netlink.Handle, netlink.Link, *nwIface) error
ErrMessage string
}{
{setInterfaceName, fmt.Sprintf("error renaming interface %q to %q", ifaceName, i.DstName())},
@@ -316,56 +317,67 @@ func configureInterface(iface netlink.Link, i *nwIface) error {
{setInterfaceIP, fmt.Sprintf("error setting interface %q IP to %v", ifaceName, i.Address())},
{setInterfaceIPv6, fmt.Sprintf("error setting interface %q IPv6 to %v", ifaceName, i.AddressIPv6())},
{setInterfaceMaster, fmt.Sprintf("error setting interface %q master to %q", ifaceName, i.DstMaster())},
+ {setInterfaceLinkLocalIPs, fmt.Sprintf("error setting interface %q link local IPs to %v", ifaceName, i.LinkLocalAddresses())},
}
for _, config := range ifaceConfigurators {
- if err := config.Fn(iface, i); err != nil {
+ if err := config.Fn(nlh, iface, i); err != nil {
return fmt.Errorf("%s: %v", config.ErrMessage, err)
}
}
return nil
}
-func setInterfaceMaster(iface netlink.Link, i *nwIface) error {
+func setInterfaceMaster(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
if i.DstMaster() == "" {
return nil
}
- return netlink.LinkSetMaster(iface, &netlink.Bridge{
+ return nlh.LinkSetMaster(iface, &netlink.Bridge{
LinkAttrs: netlink.LinkAttrs{Name: i.DstMaster()}})
}
-func setInterfaceMAC(iface netlink.Link, i *nwIface) error {
+func setInterfaceMAC(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
if i.MacAddress() == nil {
return nil
}
- return netlink.LinkSetHardwareAddr(iface, i.MacAddress())
+ return nlh.LinkSetHardwareAddr(iface, i.MacAddress())
}
-func setInterfaceIP(iface netlink.Link, i *nwIface) error {
+func setInterfaceIP(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
if i.Address() == nil {
return nil
}
ipAddr := &netlink.Addr{IPNet: i.Address(), Label: ""}
- return netlink.AddrAdd(iface, ipAddr)
+ return nlh.AddrAdd(iface, ipAddr)
}
-func setInterfaceIPv6(iface netlink.Link, i *nwIface) error {
+func setInterfaceIPv6(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
if i.AddressIPv6() == nil {
return nil
}
ipAddr := &netlink.Addr{IPNet: i.AddressIPv6(), Label: "", Flags: syscall.IFA_F_NODAD}
- return netlink.AddrAdd(iface, ipAddr)
+ return nlh.AddrAdd(iface, ipAddr)
+}
+
+func setInterfaceLinkLocalIPs(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
+ for _, llIP := range i.LinkLocalAddresses() {
+ ipAddr := &netlink.Addr{IPNet: llIP}
+ if err := nlh.AddrAdd(iface, ipAddr); err != nil {
+ return err
+ }
+ }
+ return nil
}
-func setInterfaceName(iface netlink.Link, i *nwIface) error {
- return netlink.LinkSetName(iface, i.DstName())
+func setInterfaceName(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
+ return nlh.LinkSetName(iface, i.DstName())
}
-func setInterfaceRoutes(iface netlink.Link, i *nwIface) error {
+func setInterfaceRoutes(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
for _, route := range i.Routes() {
- err := netlink.RouteAdd(&netlink.Route{
+ err := nlh.RouteAdd(&netlink.Route{
Scope: netlink.SCOPE_LINK,
LinkIndex: iface.Attrs().Index,
Dst: route,
diff --git a/vendor/src/github.com/docker/libnetwork/osl/namespace_linux.go b/vendor/src/github.com/docker/libnetwork/osl/namespace_linux.go
index 07b725c290..c804caf783 100644
--- a/vendor/src/github.com/docker/libnetwork/osl/namespace_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/osl/namespace_linux.go
@@ -42,6 +42,7 @@ type networkNamespace struct {
neighbors []*neigh
nextIfIndex int
isDefault bool
+ nlHandle *netlink.Handle
sync.Mutex
}
@@ -147,7 +148,25 @@ func NewSandbox(key string, osCreate bool) (Sandbox, error) {
return nil, err
}
- return &networkNamespace{path: key, isDefault: !osCreate}, nil
+ n := &networkNamespace{path: key, isDefault: !osCreate}
+
+ sboxNs, err := netns.GetFromPath(n.path)
+ if err != nil {
+ return nil, fmt.Errorf("failed get network namespace %q: %v", n.path, err)
+ }
+ defer sboxNs.Close()
+
+ n.nlHandle, err = netlink.NewHandleAt(sboxNs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create a netlink handle: %v", err)
+ }
+
+ if err = n.loopbackUp(); err != nil {
+ n.nlHandle.Delete()
+ return nil, err
+ }
+
+ return n, nil
}
func (n *networkNamespace) InterfaceOptions() IfaceOptionSetter {
@@ -159,30 +178,37 @@ func (n *networkNamespace) NeighborOptions() NeighborOptionSetter {
}
func mountNetworkNamespace(basePath string, lnPath string) error {
- if err := syscall.Mount(basePath, lnPath, "bind", syscall.MS_BIND, ""); err != nil {
- return err
- }
-
- if err := loopbackUp(); err != nil {
- return err
- }
- return nil
+ return syscall.Mount(basePath, lnPath, "bind", syscall.MS_BIND, "")
}
// GetSandboxForExternalKey returns sandbox object for the supplied path
func GetSandboxForExternalKey(basePath string, key string) (Sandbox, error) {
- var err error
- if err = createNamespaceFile(key); err != nil {
+ if err := createNamespaceFile(key); err != nil {
return nil, err
}
- n := &networkNamespace{path: basePath}
- n.InvokeFunc(func() {
- err = mountNetworkNamespace(basePath, key)
- })
+
+ if err := mountNetworkNamespace(basePath, key); err != nil {
+ return nil, err
+ }
+ n := &networkNamespace{path: key}
+
+ sboxNs, err := netns.GetFromPath(n.path)
+ if err != nil {
+ return nil, fmt.Errorf("failed get network namespace %q: %v", n.path, err)
+ }
+ defer sboxNs.Close()
+
+ n.nlHandle, err = netlink.NewHandleAt(sboxNs)
if err != nil {
+ return nil, fmt.Errorf("failed to create a netlink handle: %v", err)
+ }
+
+ if err = n.loopbackUp(); err != nil {
+ n.nlHandle.Delete()
return nil, err
}
- return &networkNamespace{path: key}, nil
+
+ return n, nil
}
func reexecCreateNamespace() {
@@ -243,12 +269,12 @@ func createNamespaceFile(path string) (err error) {
return err
}
-func loopbackUp() error {
- iface, err := netlink.LinkByName("lo")
+func (n *networkNamespace) loopbackUp() error {
+ iface, err := n.nlHandle.LinkByName("lo")
if err != nil {
return err
}
- return netlink.LinkSetUp(iface)
+ return n.nlHandle.LinkSetUp(iface)
}
func (n *networkNamespace) InvokeFunc(f func()) error {
@@ -260,33 +286,30 @@ func (n *networkNamespace) InvokeFunc(f func()) error {
// InitOSContext initializes OS context while configuring network resources
func InitOSContext() func() {
- runtime.LockOSThread()
nsOnce.Do(ns.Init)
+ runtime.LockOSThread()
if err := ns.SetNamespace(); err != nil {
log.Error(err)
}
-
return runtime.UnlockOSThread
}
func nsInvoke(path string, prefunc func(nsFD int) error, postfunc func(callerFD int) error) error {
defer InitOSContext()()
- f, err := os.OpenFile(path, os.O_RDONLY, 0)
+ newNs, err := netns.GetFromPath(path)
if err != nil {
return fmt.Errorf("failed get network namespace %q: %v", path, err)
}
- defer f.Close()
-
- nsFD := f.Fd()
+ defer newNs.Close()
// Invoked before the namespace switch happens but after the namespace file
// handle is obtained.
- if err := prefunc(int(nsFD)); err != nil {
+ if err := prefunc(int(newNs)); err != nil {
return fmt.Errorf("failed in prefunc: %v", err)
}
- if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+ if err = netns.Set(newNs); err != nil {
return err
}
defer ns.SetNamespace()
@@ -311,6 +334,9 @@ func (n *networkNamespace) Key() string {
}
func (n *networkNamespace) Destroy() error {
+ if n.nlHandle != nil {
+ n.nlHandle.Delete()
+ }
// Assuming no running process is executing in this network namespace,
// unmounting is sufficient to destroy it.
if err := syscall.Unmount(n.path, syscall.MNT_DETACH); err != nil {
diff --git a/vendor/src/github.com/docker/libnetwork/osl/neigh_linux.go b/vendor/src/github.com/docker/libnetwork/osl/neigh_linux.go
index a221e712da..c5c6c103d0 100644
--- a/vendor/src/github.com/docker/libnetwork/osl/neigh_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/osl/neigh_linux.go
@@ -33,53 +33,62 @@ func (n *networkNamespace) findNeighbor(dstIP net.IP, dstMac net.HardwareAddr) *
}
func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr) error {
+ var (
+ iface netlink.Link
+ err error
+ )
+
nh := n.findNeighbor(dstIP, dstMac)
if nh == nil {
return fmt.Errorf("could not find the neighbor entry to delete")
}
- return nsInvoke(n.nsPath(), func(nsFD int) error { return nil }, func(callerFD int) error {
- var iface netlink.Link
-
- if nh.linkDst != "" {
- var err error
- iface, err = netlink.LinkByName(nh.linkDst)
- if err != nil {
- return fmt.Errorf("could not find interface with destination name %s: %v",
- nh.linkDst, err)
- }
+ n.Lock()
+ nlh := n.nlHandle
+ n.Unlock()
+
+ if nh.linkDst != "" {
+ iface, err = nlh.LinkByName(nh.linkDst)
+ if err != nil {
+ return fmt.Errorf("could not find interface with destination name %s: %v",
+ nh.linkDst, err)
}
+ }
- nlnh := &netlink.Neigh{
- IP: dstIP,
- State: netlink.NUD_PERMANENT,
- Family: nh.family,
- }
+ nlnh := &netlink.Neigh{
+ IP: dstIP,
+ State: netlink.NUD_PERMANENT,
+ Family: nh.family,
+ }
- if nlnh.Family > 0 {
- nlnh.HardwareAddr = dstMac
- nlnh.Flags = netlink.NTF_SELF
- }
+ if nlnh.Family > 0 {
+ nlnh.HardwareAddr = dstMac
+ nlnh.Flags = netlink.NTF_SELF
+ }
- if nh.linkDst != "" {
- nlnh.LinkIndex = iface.Attrs().Index
- }
+ if nh.linkDst != "" {
+ nlnh.LinkIndex = iface.Attrs().Index
+ }
- if err := netlink.NeighDel(nlnh); err != nil {
- return fmt.Errorf("could not delete neighbor entry: %v", err)
- }
+ if err := nlh.NeighDel(nlnh); err != nil {
+ return fmt.Errorf("could not delete neighbor entry: %v", err)
+ }
- for i, nh := range n.neighbors {
- if nh.dstIP.Equal(dstIP) && bytes.Equal(nh.dstMac, dstMac) {
- n.neighbors = append(n.neighbors[:i], n.neighbors[i+1:]...)
- }
+ for i, nh := range n.neighbors {
+ if nh.dstIP.Equal(dstIP) && bytes.Equal(nh.dstMac, dstMac) {
+ n.neighbors = append(n.neighbors[:i], n.neighbors[i+1:]...)
}
+ }
- return nil
- })
+ return nil
}
func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, options ...NeighOption) error {
+ var (
+ iface netlink.Link
+ err error
+ )
+
nh := n.findNeighbor(dstIP, dstMac)
if nh != nil {
// If it exists silently return
@@ -100,39 +109,38 @@ func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, op
}
}
- return nsInvoke(n.nsPath(), func(nsFD int) error { return nil }, func(callerFD int) error {
- var iface netlink.Link
-
- if nh.linkDst != "" {
- var err error
- iface, err = netlink.LinkByName(nh.linkDst)
- if err != nil {
- return fmt.Errorf("could not find interface with destination name %s: %v",
- nh.linkDst, err)
- }
+ n.Lock()
+ nlh := n.nlHandle
+ n.Unlock()
+
+ if nh.linkDst != "" {
+ iface, err = nlh.LinkByName(nh.linkDst)
+ if err != nil {
+ return fmt.Errorf("could not find interface with destination name %s: %v",
+ nh.linkDst, err)
}
+ }
- nlnh := &netlink.Neigh{
- IP: dstIP,
- HardwareAddr: dstMac,
- State: netlink.NUD_PERMANENT,
- Family: nh.family,
- }
+ nlnh := &netlink.Neigh{
+ IP: dstIP,
+ HardwareAddr: dstMac,
+ State: netlink.NUD_PERMANENT,
+ Family: nh.family,
+ }
- if nlnh.Family > 0 {
- nlnh.Flags = netlink.NTF_SELF
- }
+ if nlnh.Family > 0 {
+ nlnh.Flags = netlink.NTF_SELF
+ }
- if nh.linkDst != "" {
- nlnh.LinkIndex = iface.Attrs().Index
- }
+ if nh.linkDst != "" {
+ nlnh.LinkIndex = iface.Attrs().Index
+ }
- if err := netlink.NeighSet(nlnh); err != nil {
- return fmt.Errorf("could not add neighbor entry: %v", err)
- }
+ if err := nlh.NeighSet(nlnh); err != nil {
+ return fmt.Errorf("could not add neighbor entry: %v", err)
+ }
- n.neighbors = append(n.neighbors, nh)
+ n.neighbors = append(n.neighbors, nh)
- return nil
- })
+ return nil
}
diff --git a/vendor/src/github.com/docker/libnetwork/osl/options_linux.go b/vendor/src/github.com/docker/libnetwork/osl/options_linux.go
index ea28e8b6be..818669647f 100644
--- a/vendor/src/github.com/docker/libnetwork/osl/options_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/osl/options_linux.go
@@ -60,6 +60,12 @@ func (n *networkNamespace) AddressIPv6(addr *net.IPNet) IfaceOption {
}
}
+func (n *networkNamespace) LinkLocalAddresses(list []*net.IPNet) IfaceOption {
+ return func(i *nwIface) {
+ i.llAddrs = list
+ }
+}
+
func (n *networkNamespace) Routes(routes []*net.IPNet) IfaceOption {
return func(i *nwIface) {
i.routes = routes
diff --git a/vendor/src/github.com/docker/libnetwork/osl/route_linux.go b/vendor/src/github.com/docker/libnetwork/osl/route_linux.go
index 09a0a45f89..78d1f9a14f 100644
--- a/vendor/src/github.com/docker/libnetwork/osl/route_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/osl/route_linux.go
@@ -53,7 +53,7 @@ func (n *networkNamespace) SetGateway(gw net.IP) error {
return nil
}
- err := programGateway(n.nsPath(), gw, true)
+ err := n.programGateway(gw, true)
if err == nil {
n.setGateway(gw)
}
@@ -69,7 +69,7 @@ func (n *networkNamespace) UnsetGateway() error {
return nil
}
- err := programGateway(n.nsPath(), gw, false)
+ err := n.programGateway(gw, false)
if err == nil {
n.setGateway(net.IP{})
}
@@ -77,60 +77,54 @@ func (n *networkNamespace) UnsetGateway() error {
return err
}
-func programGateway(path string, gw net.IP, isAdd bool) error {
- return nsInvoke(path, func(nsFD int) error { return nil }, func(callerFD int) error {
- gwRoutes, err := netlink.RouteGet(gw)
- if err != nil {
- return fmt.Errorf("route for the gateway %s could not be found: %v", gw, err)
- }
-
- if isAdd {
- return netlink.RouteAdd(&netlink.Route{
- Scope: netlink.SCOPE_UNIVERSE,
- LinkIndex: gwRoutes[0].LinkIndex,
- Gw: gw,
- })
- }
+func (n *networkNamespace) programGateway(gw net.IP, isAdd bool) error {
+ gwRoutes, err := n.nlHandle.RouteGet(gw)
+ if err != nil {
+ return fmt.Errorf("route for the gateway %s could not be found: %v", gw, err)
+ }
- return netlink.RouteDel(&netlink.Route{
+ if isAdd {
+ return n.nlHandle.RouteAdd(&netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
LinkIndex: gwRoutes[0].LinkIndex,
Gw: gw,
})
+ }
+
+ return n.nlHandle.RouteDel(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: gwRoutes[0].LinkIndex,
+ Gw: gw,
})
}
// Program a route in to the namespace routing table.
-func programRoute(path string, dest *net.IPNet, nh net.IP) error {
- return nsInvoke(path, func(nsFD int) error { return nil }, func(callerFD int) error {
- gwRoutes, err := netlink.RouteGet(nh)
- if err != nil {
- return fmt.Errorf("route for the next hop %s could not be found: %v", nh, err)
- }
+func (n *networkNamespace) programRoute(path string, dest *net.IPNet, nh net.IP) error {
+ gwRoutes, err := n.nlHandle.RouteGet(nh)
+ if err != nil {
+ return fmt.Errorf("route for the next hop %s could not be found: %v", nh, err)
+ }
- return netlink.RouteAdd(&netlink.Route{
- Scope: netlink.SCOPE_UNIVERSE,
- LinkIndex: gwRoutes[0].LinkIndex,
- Gw: nh,
- Dst: dest,
- })
+ return n.nlHandle.RouteAdd(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: gwRoutes[0].LinkIndex,
+ Gw: nh,
+ Dst: dest,
})
}
// Delete a route from the namespace routing table.
-func removeRoute(path string, dest *net.IPNet, nh net.IP) error {
- return nsInvoke(path, func(nsFD int) error { return nil }, func(callerFD int) error {
- gwRoutes, err := netlink.RouteGet(nh)
- if err != nil {
- return fmt.Errorf("route for the next hop could not be found: %v", err)
- }
+func (n *networkNamespace) removeRoute(path string, dest *net.IPNet, nh net.IP) error {
+ gwRoutes, err := n.nlHandle.RouteGet(nh)
+ if err != nil {
+ return fmt.Errorf("route for the next hop could not be found: %v", err)
+ }
- return netlink.RouteDel(&netlink.Route{
- Scope: netlink.SCOPE_UNIVERSE,
- LinkIndex: gwRoutes[0].LinkIndex,
- Gw: nh,
- Dst: dest,
- })
+ return n.nlHandle.RouteDel(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: gwRoutes[0].LinkIndex,
+ Gw: nh,
+ Dst: dest,
})
}
@@ -140,7 +134,7 @@ func (n *networkNamespace) SetGatewayIPv6(gwv6 net.IP) error {
return nil
}
- err := programGateway(n.nsPath(), gwv6, true)
+ err := n.programGateway(gwv6, true)
if err == nil {
n.setGatewayIPv6(gwv6)
}
@@ -156,7 +150,7 @@ func (n *networkNamespace) UnsetGatewayIPv6() error {
return nil
}
- err := programGateway(n.nsPath(), gwv6, false)
+ err := n.programGateway(gwv6, false)
if err == nil {
n.Lock()
n.gwv6 = net.IP{}
@@ -167,7 +161,7 @@ func (n *networkNamespace) UnsetGatewayIPv6() error {
}
func (n *networkNamespace) AddStaticRoute(r *types.StaticRoute) error {
- err := programRoute(n.nsPath(), r.Destination, r.NextHop)
+ err := n.programRoute(n.nsPath(), r.Destination, r.NextHop)
if err == nil {
n.Lock()
n.staticRoutes = append(n.staticRoutes, r)
@@ -178,7 +172,7 @@ func (n *networkNamespace) AddStaticRoute(r *types.StaticRoute) error {
func (n *networkNamespace) RemoveStaticRoute(r *types.StaticRoute) error {
- err := removeRoute(n.nsPath(), r.Destination, r.NextHop)
+ err := n.removeRoute(n.nsPath(), r.Destination, r.NextHop)
if err == nil {
n.Lock()
lastIndex := len(n.staticRoutes) - 1
diff --git a/vendor/src/github.com/docker/libnetwork/osl/sandbox.go b/vendor/src/github.com/docker/libnetwork/osl/sandbox.go
index db49d43dce..5264b35073 100644
--- a/vendor/src/github.com/docker/libnetwork/osl/sandbox.go
+++ b/vendor/src/github.com/docker/libnetwork/osl/sandbox.go
@@ -60,7 +60,7 @@ type Sandbox interface {
Destroy() error
}
-// NeighborOptionSetter interfaces defines the option setter methods for interface options
+// NeighborOptionSetter interface defines the option setter methods for interface options
type NeighborOptionSetter interface {
// LinkName returns an option setter to set the srcName of the link that should
// be used in the neighbor entry
@@ -85,6 +85,9 @@ type IfaceOptionSetter interface {
// Address returns an option setter to set IPv6 address.
AddressIPv6(*net.IPNet) IfaceOption
+ // LinkLocalAddresses returns an option setter to set the link-local IP addresses.
+ LinkLocalAddresses([]*net.IPNet) IfaceOption
+
// Master returns an option setter to set the master interface if any for this
// interface. The master interface name should refer to the srcname of a
// previously added interface of type bridge.
@@ -99,8 +102,8 @@ type IfaceOptionSetter interface {
// interfaces, routes and gateway
type Info interface {
// The collection of Interface previously added with the AddInterface
- // method. Note that this doesn't incude network interfaces added in any
- // other way (such as the default loopback interface which are automatically
+ // method. Note that this doesn't include network interfaces added in any
+ // other way (such as the default loopback interface which is automatically
// created on creation of a sandbox).
Interfaces() []Interface
@@ -138,6 +141,9 @@ type Interface interface {
// IPv6 address for the interface.
AddressIPv6() *net.IPNet
+ // LinkLocalAddresses returns the link-local IP addresses assigned to the interface.
+ LinkLocalAddresses() []*net.IPNet
+
// IP routes for the interface.
Routes() []*net.IPNet
diff --git a/vendor/src/github.com/docker/libnetwork/resolver.go b/vendor/src/github.com/docker/libnetwork/resolver.go
index 08a81eebcc..7566dcbf4e 100644
--- a/vendor/src/github.com/docker/libnetwork/resolver.go
+++ b/vendor/src/github.com/docker/libnetwork/resolver.go
@@ -275,15 +275,48 @@ func (r *resolver) handlePTRQuery(ptr string, query *dns.Msg) (*dns.Msg, error)
return resp, nil
}
+func (r *resolver) handleSRVQuery(svc string, query *dns.Msg) (*dns.Msg, error) {
+ srv, ip, err := r.sb.ResolveService(svc)
+
+ if err != nil {
+ return nil, err
+ }
+ if len(srv) != len(ip) {
+ return nil, fmt.Errorf("invalid reply for SRV query %s", svc)
+ }
+
+ resp := createRespMsg(query)
+
+ for i, r := range srv {
+ rr := new(dns.SRV)
+ rr.Hdr = dns.RR_Header{Name: svc, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: respTTL}
+ rr.Port = r.Port
+ rr.Target = r.Target
+ resp.Answer = append(resp.Answer, rr)
+
+ rr1 := new(dns.A)
+ rr1.Hdr = dns.RR_Header{Name: r.Target, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: respTTL}
+ rr1.A = ip[i]
+ resp.Extra = append(resp.Extra, rr1)
+ }
+ return resp, nil
+
+}
+
func truncateResp(resp *dns.Msg, maxSize int, isTCP bool) {
if !isTCP {
resp.Truncated = true
}
+ srv := resp.Question[0].Qtype == dns.TypeSRV
// trim the Answer RRs one by one till the whole message fits
// within the reply size
for resp.Len() > maxSize {
resp.Answer = resp.Answer[:len(resp.Answer)-1]
+
+ if srv && len(resp.Extra) > 0 {
+ resp.Extra = resp.Extra[:len(resp.Extra)-1]
+ }
}
}
@@ -299,12 +332,16 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
return
}
name := query.Question[0].Name
- if query.Question[0].Qtype == dns.TypeA {
+
+ switch query.Question[0].Qtype {
+ case dns.TypeA:
resp, err = r.handleIPQuery(name, query, types.IPv4)
- } else if query.Question[0].Qtype == dns.TypeAAAA {
+ case dns.TypeAAAA:
resp, err = r.handleIPQuery(name, query, types.IPv6)
- } else if query.Question[0].Qtype == dns.TypePTR {
+ case dns.TypePTR:
resp, err = r.handlePTRQuery(name, query)
+ case dns.TypeSRV:
+ resp, err = r.handleSRVQuery(name, query)
}
if err != nil {
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox.go b/vendor/src/github.com/docker/libnetwork/sandbox.go
index 4cdb017fb3..05f44809be 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox.go
@@ -28,7 +28,7 @@ type Sandbox interface {
Labels() map[string]interface{}
// Statistics retrieves the interfaces' statistics for the sandbox
Statistics() (map[string]*types.InterfaceStatistics, error)
- // Refresh leaves all the endpoints, resets and re-apply the options,
+ // Refresh leaves all the endpoints, resets and re-applies the options,
// re-joins all the endpoints without destroying the osl sandbox
Refresh(options ...SandboxOption) error
// SetKey updates the Sandbox Key
@@ -38,13 +38,16 @@ type Sandbox interface {
// Delete destroys this container after detaching it from all connected endpoints.
Delete() error
// ResolveName resolves a service name to an IPv4 or IPv6 address by searching
- // the networks the sandbox is connected to. For IPv6 queries, second return
+ // the networks the sandbox is connected to. For IPv6 queries, second return
// value will be true if the name exists in docker domain but doesn't have an
- // IPv6 address. Such queries shouldn't be forwarded to external nameservers.
+ // IPv6 address. Such queries shouldn't be forwarded to external nameservers.
ResolveName(name string, iplen int) ([]net.IP, bool)
// ResolveIP returns the service name for the passed in IP. IP is in reverse dotted
// notation; the format used for DNS PTR records
ResolveIP(name string) string
+ // ResolveService returns all the backend details about the containers or hosts
+ // backing a service. Its purpose is to satisfy an SRV query
+ ResolveService(name string) ([]*net.SRV, []net.IP, error)
// Endpoints returns all the endpoints connected to the sandbox
Endpoints() []Endpoint
}
@@ -81,6 +84,7 @@ type sandbox struct {
dbExists bool
isStub bool
inDelete bool
+ ingress bool
sync.Mutex
}
@@ -241,6 +245,9 @@ func (sb *sandbox) delete(force bool) error {
}
c.Lock()
+ if sb.ingress {
+ c.ingressSandbox = nil
+ }
delete(c.sandboxes, sb.ID())
c.Unlock()
@@ -291,7 +298,7 @@ func (sb *sandbox) Refresh(options ...SandboxOption) error {
return err
}
- // Re -connect to all endpoints
+ // Re-connect to all endpoints
for _, ep := range epList {
if err := ep.Join(sb); err != nil {
log.Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err)
@@ -425,6 +432,61 @@ func (sb *sandbox) execFunc(f func()) {
sb.osSbox.InvokeFunc(f)
}
+func (sb *sandbox) ResolveService(name string) ([]*net.SRV, []net.IP, error) {
+ srv := []*net.SRV{}
+ ip := []net.IP{}
+
+ log.Debugf("Service name To resolve: %v", name)
+
+ parts := strings.Split(name, ".")
+ if len(parts) < 3 {
+ return nil, nil, fmt.Errorf("invalid service name, %s", name)
+ }
+
+ portName := parts[0]
+ proto := parts[1]
+ if proto != "_tcp" && proto != "_udp" {
+ return nil, nil, fmt.Errorf("invalid protocol in service, %s", name)
+ }
+ svcName := strings.Join(parts[2:], ".")
+
+ for _, ep := range sb.getConnectedEndpoints() {
+ n := ep.getNetwork()
+
+ sr, ok := n.getController().svcRecords[n.ID()]
+ if !ok {
+ continue
+ }
+
+ svcs, ok := sr.service[svcName]
+ if !ok {
+ continue
+ }
+
+ for _, svc := range svcs {
+ if svc.portName != portName {
+ continue
+ }
+ if svc.proto != proto {
+ continue
+ }
+ for _, t := range svc.target {
+ srv = append(srv,
+ &net.SRV{
+ Target: t.name,
+ Port: t.port,
+ })
+
+ ip = append(ip, t.ip)
+ }
+ }
+ if len(srv) > 0 {
+ break
+ }
+ }
+ return srv, ip, nil
+}
+
func (sb *sandbox) ResolveName(name string, ipType int) ([]net.IP, bool) {
// Embedded server owns the docker network domain. Resolution should work
// for both container_name and container_name.network_name
@@ -663,6 +725,9 @@ func (sb *sandbox) populateNetworkResources(ep *endpoint) error {
if i.addrv6 != nil && i.addrv6.IP.To16() != nil {
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().AddressIPv6(i.addrv6))
}
+ if len(i.llAddrs) != 0 {
+ ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().LinkLocalAddresses(i.llAddrs))
+ }
if i.mac != nil {
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().MacAddress(i.mac))
}
@@ -687,6 +752,12 @@ func (sb *sandbox) populateNetworkResources(ep *endpoint) error {
}
}
+ // Populate load balancer only after updating all the other
+ // information including gateway and other routes so that
+ // loadbalancers are populated all the network state is in
+ // place in the sandbox.
+ sb.populateLoadbalancers(ep)
+
// Only update the store if we did not come here as part of
// sandbox delete. If we came here as part of delete then do
// not bother updating the store. The sandbox object will be
@@ -826,7 +897,7 @@ func OptionHostsPath(path string) SandboxOption {
}
// OptionOriginHostsPath function returns an option setter for origin hosts file path
-// tbeo passed to NewSandbox method.
+// to be passed to NewSandbox method.
func OptionOriginHostsPath(path string) SandboxOption {
return func(sb *sandbox) {
sb.config.originHostsPath = path
@@ -949,6 +1020,14 @@ func OptionPortMapping(portBindings []types.PortBinding) SandboxOption {
}
}
+// OptionIngress function returns an option setter for marking a
+// sandbox as the controller's ingress sandbox.
+func OptionIngress() SandboxOption {
+ return func(sb *sandbox) {
+ sb.ingress = true
+ }
+}
+
func (eh epHeap) Len() int { return len(eh) }
func (eh epHeap) Less(i, j int) bool {
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go b/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go
index 8d59e3d66a..5a3edba498 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go
@@ -239,7 +239,7 @@ func (sb *sandbox) updateDNS(ipv6Enabled bool) error {
if currHash != "" && currHash != currRC.Hash {
// Seems the user has changed the container resolv.conf since the last time
// we checked so return without doing anything.
- log.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled)
+ //log.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled)
return nil
}
diff --git a/vendor/src/github.com/docker/libnetwork/service.go b/vendor/src/github.com/docker/libnetwork/service.go
index 9caed0ae22..cdf18bd499 100644
--- a/vendor/src/github.com/docker/libnetwork/service.go
+++ b/vendor/src/github.com/docker/libnetwork/service.go
@@ -1,80 +1,39 @@
package libnetwork
-import "net"
+import (
+ "net"
+ "sync"
+)
+
+var (
+ // A global monotonic counter to assign firewall marks to
+ // services.
+ fwMarkCtr uint32 = 256
+ fwMarkCtrMu sync.Mutex
+)
type service struct {
- name string
- id string
- backEnds map[string]map[string]net.IP
-}
-
-func newService(name string, id string) *service {
- return &service{
- name: name,
- id: id,
- backEnds: make(map[string]map[string]net.IP),
- }
-}
-
-func (c *controller) addServiceBinding(name, sid, nid, eid string, ip net.IP) error {
- var s *service
-
- n, err := c.NetworkByID(nid)
- if err != nil {
- return err
- }
-
- c.Lock()
- s, ok := c.serviceBindings[sid]
- if !ok {
- s = newService(name, sid)
- }
+ name string // Service Name
+ id string // Service ID
- netBackEnds, ok := s.backEnds[nid]
- if !ok {
- netBackEnds = make(map[string]net.IP)
- s.backEnds[nid] = netBackEnds
- }
+ // Map of loadbalancers for the service one-per attached
+ // network. It is keyed with network ID.
+ loadBalancers map[string]*loadBalancer
- netBackEnds[eid] = ip
- c.serviceBindings[sid] = s
- c.Unlock()
+ // List of ingress ports exposed by the service
+ ingressPorts []*PortConfig
- n.(*network).addSvcRecords(name, ip, nil, false)
- return nil
+ sync.Mutex
}
-func (c *controller) rmServiceBinding(name, sid, nid, eid string, ip net.IP) error {
- n, err := c.NetworkByID(nid)
- if err != nil {
- return err
- }
-
- c.Lock()
- s, ok := c.serviceBindings[sid]
- if !ok {
- c.Unlock()
- return nil
- }
-
- netBackEnds, ok := s.backEnds[nid]
- if !ok {
- c.Unlock()
- return nil
- }
-
- delete(netBackEnds, eid)
-
- if len(netBackEnds) == 0 {
- delete(s.backEnds, nid)
- }
-
- if len(s.backEnds) == 0 {
- delete(c.serviceBindings, sid)
- }
- c.Unlock()
+type loadBalancer struct {
+ vip net.IP
+ fwMark uint32
- n.(*network).deleteSvcRecords(name, ip, nil, false)
+ // Map of backend IPs backing this loadbalancer on this
+ // network. It is keyed with endpoint ID.
+ backEnds map[string]net.IP
- return err
+ // Back pointer to service to which the loadbalancer belongs.
+ service *service
}
diff --git a/vendor/src/github.com/docker/libnetwork/service_linux.go b/vendor/src/github.com/docker/libnetwork/service_linux.go
new file mode 100644
index 0000000000..204c59d9ff
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/service_linux.go
@@ -0,0 +1,646 @@
+package libnetwork
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/docker/libnetwork/iptables"
+ "github.com/docker/libnetwork/ipvs"
+ "github.com/docker/libnetwork/ns"
+ "github.com/gogo/protobuf/proto"
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+func init() {
+ reexec.Register("fwmarker", fwMarker)
+}
+
+func newService(name string, id string, ingressPorts []*PortConfig) *service {
+ return &service{
+ name: name,
+ id: id,
+ ingressPorts: ingressPorts,
+ loadBalancers: make(map[string]*loadBalancer),
+ }
+}
+
+func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+ var (
+ s *service
+ addService bool
+ )
+
+ n, err := c.NetworkByID(nid)
+ if err != nil {
+ return err
+ }
+
+ c.Lock()
+ s, ok := c.serviceBindings[sid]
+ if !ok {
+ // Create a new service if we are seeing this service
+ // for the first time.
+ s = newService(name, sid, ingressPorts)
+ c.serviceBindings[sid] = s
+ }
+ c.Unlock()
+
+ // Add endpoint IP to special "tasks.svc_name" so that the
+ // applications have access to DNS RR.
+ n.(*network).addSvcRecords("tasks."+name, ip, nil, false)
+
+ // Add service name to vip in DNS, if vip is valid. Otherwise resort to DNS RR
+ svcIP := vip
+ if len(svcIP) == 0 {
+ svcIP = ip
+ }
+ n.(*network).addSvcRecords(name, svcIP, nil, false)
+
+ s.Lock()
+ defer s.Unlock()
+
+ lb, ok := s.loadBalancers[nid]
+ if !ok {
+ // Create a new load balancer if we are seeing this
+ // network attachment on the service for the first
+ // time.
+ lb = &loadBalancer{
+ vip: vip,
+ fwMark: fwMarkCtr,
+ backEnds: make(map[string]net.IP),
+ service: s,
+ }
+
+ fwMarkCtrMu.Lock()
+ fwMarkCtr++
+ fwMarkCtrMu.Unlock()
+
+ s.loadBalancers[nid] = lb
+
+ // Since we just created this load balancer make sure
+ // we add a new service service in IPVS rules.
+ addService = true
+
+ }
+
+ lb.backEnds[eid] = ip
+
+ // Add loadbalancer service and backend in all sandboxes in
+ // the network only if vip is valid.
+ if len(vip) != 0 {
+ n.(*network).addLBBackend(ip, vip, lb.fwMark, ingressPorts, addService)
+ }
+
+ return nil
+}
+
+func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+ var rmService bool
+
+ n, err := c.NetworkByID(nid)
+ if err != nil {
+ return err
+ }
+
+ c.Lock()
+ s, ok := c.serviceBindings[sid]
+ if !ok {
+ c.Unlock()
+ return nil
+ }
+ c.Unlock()
+
+ // Delete the special "tasks.svc_name" backend record.
+ n.(*network).deleteSvcRecords("tasks."+name, ip, nil, false)
+
+ // Make sure to remove the right IP since if vip is
+ // not valid we would have added a DNS RR record.
+ svcIP := vip
+ if len(svcIP) == 0 {
+ svcIP = ip
+ }
+ n.(*network).deleteSvcRecords(name, svcIP, nil, false)
+
+ s.Lock()
+ defer s.Unlock()
+
+ lb, ok := s.loadBalancers[nid]
+ if !ok {
+ return nil
+ }
+
+ delete(lb.backEnds, eid)
+ if len(lb.backEnds) == 0 {
+ // All the backends for this service have been
+ // removed. Time to remove the load balancer and also
+ // remove the service entry in IPVS.
+ rmService = true
+
+ delete(s.loadBalancers, nid)
+ }
+
+ if len(s.loadBalancers) == 0 {
+ // All loadbalancers for the service removed. Time to
+ // remove the service itself.
+ delete(c.serviceBindings, sid)
+ }
+
+ // Remove loadbalancer service(if needed) and backend in all
+ // sandboxes in the network only if the vip is valid.
+ if len(vip) != 0 {
+ n.(*network).rmLBBackend(ip, vip, lb.fwMark, ingressPorts, rmService)
+ }
+
+ return nil
+}
+
+// Get all loadbalancers on this network that is currently discovered
+// on this node.
+func (n *network) connectedLoadbalancers() []*loadBalancer {
+ c := n.getController()
+
+ c.Lock()
+ defer c.Unlock()
+
+ var lbs []*loadBalancer
+ for _, s := range c.serviceBindings {
+ if lb, ok := s.loadBalancers[n.ID()]; ok {
+ lbs = append(lbs, lb)
+ }
+ }
+
+ return lbs
+}
+
+// Populate all loadbalancers on the network that the passed endpoint
+// belongs to, into this sandbox.
+func (sb *sandbox) populateLoadbalancers(ep *endpoint) {
+ var gwIP net.IP
+
+ n := ep.getNetwork()
+ eIP := ep.Iface().Address()
+
+ if sb.ingress {
+ // For the ingress sandbox if this is not gateway
+ // endpoint do nothing.
+ if ep != sb.getGatewayEndpoint() {
+ return
+ }
+
+ // This is the gateway endpoint. Now get the ingress
+ // network and plumb the loadbalancers.
+ gwIP = ep.Iface().Address().IP
+ for _, ep := range sb.getConnectedEndpoints() {
+ if !ep.endpointInGWNetwork() {
+ n = ep.getNetwork()
+ eIP = ep.Iface().Address()
+ }
+ }
+ }
+
+ for _, lb := range n.connectedLoadbalancers() {
+ // Skip if vip is not valid.
+ if len(lb.vip) == 0 {
+ continue
+ }
+
+ addService := true
+ for _, ip := range lb.backEnds {
+ sb.addLBBackend(ip, lb.vip, lb.fwMark, lb.service.ingressPorts,
+ eIP, gwIP, addService)
+ addService = false
+ }
+ }
+}
+
+// Add loadbalancer backend to all sandboxes which has a connection to
+// this network. If needed add the service as well, as specified by
+// the addService bool.
+func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, addService bool) {
+ n.WalkEndpoints(func(e Endpoint) bool {
+ ep := e.(*endpoint)
+ if sb, ok := ep.getSandbox(); ok {
+ var gwIP net.IP
+ if ep := sb.getGatewayEndpoint(); ep != nil {
+ gwIP = ep.Iface().Address().IP
+ }
+
+ sb.addLBBackend(ip, vip, fwMark, ingressPorts, ep.Iface().Address(), gwIP, addService)
+ }
+
+ return false
+ })
+}
+
+// Remove loadbalancer backend from all sandboxes which has a
+// connection to this network. If needed remove the service entry as
+// well, as specified by the rmService bool.
+func (n *network) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, rmService bool) {
+ n.WalkEndpoints(func(e Endpoint) bool {
+ ep := e.(*endpoint)
+ if sb, ok := ep.getSandbox(); ok {
+ var gwIP net.IP
+ if ep := sb.getGatewayEndpoint(); ep != nil {
+ gwIP = ep.Iface().Address().IP
+ }
+
+ sb.rmLBBackend(ip, vip, fwMark, ingressPorts, ep.Iface().Address(), gwIP, rmService)
+ }
+
+ return false
+ })
+}
+
+// Add loadbalancer backend into one connected sandbox.
+func (sb *sandbox) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, eIP *net.IPNet, gwIP net.IP, addService bool) {
+ if sb.osSbox == nil {
+ return
+ }
+
+ i, err := ipvs.New(sb.Key())
+ if err != nil {
+ logrus.Errorf("Failed to create a ipvs handle for sbox %s: %v", sb.Key(), err)
+ return
+ }
+ defer i.Close()
+
+ s := &ipvs.Service{
+ AddressFamily: nl.FAMILY_V4,
+ FWMark: fwMark,
+ SchedName: ipvs.RoundRobin,
+ }
+
+ if addService {
+ var iPorts []*PortConfig
+ if sb.ingress {
+ iPorts = ingressPorts
+ if err := programIngress(gwIP, iPorts, false); err != nil {
+ logrus.Errorf("Failed to add ingress: %v", err)
+ return
+ }
+ }
+
+ logrus.Debugf("Creating service for vip %s fwMark %d ingressPorts %#v", vip, fwMark, iPorts)
+ if err := invokeFWMarker(sb.Key(), vip, fwMark, iPorts, eIP, false); err != nil {
+ logrus.Errorf("Failed to add firewall mark rule in sbox %s: %v", sb.Key(), err)
+ return
+ }
+
+ if err := i.NewService(s); err != nil {
+ logrus.Errorf("Failed to create a new service for vip %s fwmark %d: %v", vip, fwMark, err)
+ return
+ }
+ }
+
+ d := &ipvs.Destination{
+ AddressFamily: nl.FAMILY_V4,
+ Address: ip,
+ Weight: 1,
+ }
+
+ // Remove the sched name before using the service to add
+ // destination.
+ s.SchedName = ""
+ if err := i.NewDestination(s, d); err != nil && err != syscall.EEXIST {
+ logrus.Errorf("Failed to create real server %s for vip %s fwmark %d in sb %s: %v", ip, vip, fwMark, sb.containerID, err)
+ }
+}
+
+// Remove loadbalancer backend from one connected sandbox.
+func (sb *sandbox) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, eIP *net.IPNet, gwIP net.IP, rmService bool) {
+ if sb.osSbox == nil {
+ return
+ }
+
+ i, err := ipvs.New(sb.Key())
+ if err != nil {
+ logrus.Errorf("Failed to create a ipvs handle for sbox %s: %v", sb.Key(), err)
+ return
+ }
+ defer i.Close()
+
+ s := &ipvs.Service{
+ AddressFamily: nl.FAMILY_V4,
+ FWMark: fwMark,
+ }
+
+ d := &ipvs.Destination{
+ AddressFamily: nl.FAMILY_V4,
+ Address: ip,
+ Weight: 1,
+ }
+
+ if err := i.DelDestination(s, d); err != nil {
+ logrus.Errorf("Failed to delete real server %s for vip %s fwmark %d: %v", ip, vip, fwMark, err)
+ return
+ }
+
+ if rmService {
+ s.SchedName = ipvs.RoundRobin
+ if err := i.DelService(s); err != nil {
+ logrus.Errorf("Failed to create a new service for vip %s fwmark %d: %v", vip, fwMark, err)
+ return
+ }
+
+ var iPorts []*PortConfig
+ if sb.ingress {
+ iPorts = ingressPorts
+ if err := programIngress(gwIP, iPorts, true); err != nil {
+ logrus.Errorf("Failed to delete ingress: %v", err)
+ return
+ }
+ }
+
+ if err := invokeFWMarker(sb.Key(), vip, fwMark, iPorts, eIP, true); err != nil {
+ logrus.Errorf("Failed to add firewall mark rule in sbox %s: %v", sb.Key(), err)
+ return
+ }
+ }
+}
+
+const ingressChain = "DOCKER-INGRESS"
+
+var (
+ ingressOnce sync.Once
+ ingressProxyMu sync.Mutex
+ ingressProxyTbl = make(map[string]io.Closer)
+)
+
+func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) error {
+ addDelOpt := "-I"
+ if isDelete {
+ addDelOpt = "-D"
+ }
+
+ chainExists := iptables.ExistChain(ingressChain, iptables.Nat)
+
+ ingressOnce.Do(func() {
+ if chainExists {
+ // Flush ingress chain rules during init if it
+ // exists. It might contain stale rules from
+ // previous life.
+ if err := iptables.RawCombinedOutput("-t", "nat", "-F", ingressChain); err != nil {
+ logrus.Errorf("Could not flush ingress chain rules during init: %v", err)
+ }
+ }
+ })
+
+ if !isDelete {
+ if !chainExists {
+ if err := iptables.RawCombinedOutput("-t", "nat", "-N", ingressChain); err != nil {
+ return fmt.Errorf("failed to create ingress chain: %v", err)
+ }
+ }
+
+ if !iptables.Exists(iptables.Nat, ingressChain, "-j", "RETURN") {
+ if err := iptables.RawCombinedOutput("-t", "nat", "-A", ingressChain, "-j", "RETURN"); err != nil {
+ return fmt.Errorf("failed to add return rule in ingress chain: %v", err)
+ }
+ }
+
+ for _, chain := range []string{"OUTPUT", "PREROUTING"} {
+ if !iptables.Exists(iptables.Nat, chain, "-j", ingressChain) {
+ if err := iptables.RawCombinedOutput("-t", "nat", "-I", chain, "-j", ingressChain); err != nil {
+ return fmt.Errorf("failed to add jump rule in %s to ingress chain: %v", chain, err)
+ }
+ }
+ }
+
+ oifName, err := findOIFName(gwIP)
+ if err != nil {
+ return fmt.Errorf("failed to find gateway bridge interface name for %s: %v", gwIP, err)
+ }
+
+ path := filepath.Join("/proc/sys/net/ipv4/conf", oifName, "route_localnet")
+ if err := ioutil.WriteFile(path, []byte{'1', '\n'}, 0644); err != nil {
+ return fmt.Errorf("could not write to %s: %v", path, err)
+ }
+
+ ruleArgs := strings.Fields(fmt.Sprintf("-m addrtype --src-type LOCAL -o %s -j MASQUERADE", oifName))
+ if !iptables.Exists(iptables.Nat, "POSTROUTING", ruleArgs...) {
+ if err := iptables.RawCombinedOutput(append([]string{"-t", "nat", "-I", "POSTROUTING"}, ruleArgs...)...); err != nil {
+ return fmt.Errorf("failed to add ingress localhost POSTROUTING rule for %s: %v", oifName, err)
+ }
+ }
+ }
+
+ for _, iPort := range ingressPorts {
+ if iptables.ExistChain(ingressChain, iptables.Nat) {
+ rule := strings.Fields(fmt.Sprintf("-t nat %s %s -p %s --dport %d -j DNAT --to-destination %s:%d",
+ addDelOpt, ingressChain, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort, gwIP, iPort.PublishedPort))
+ if err := iptables.RawCombinedOutput(rule...); err != nil {
+ return fmt.Errorf("setting up rule failed, %v: %v", rule, err)
+ }
+ }
+
+ if err := plumbProxy(iPort, isDelete); err != nil {
+ return fmt.Errorf("failed to create proxy for port %d: %v", iPort.PublishedPort, err)
+ }
+ }
+
+ return nil
+}
+
+func findOIFName(ip net.IP) (string, error) {
+ nlh := ns.NlHandle()
+
+ routes, err := nlh.RouteGet(ip)
+ if err != nil {
+ return "", err
+ }
+
+ if len(routes) == 0 {
+ return "", fmt.Errorf("no route to %s", ip)
+ }
+
+ // Pick the first route(typically there is only one route). We
+ // don't support multipath.
+ link, err := nlh.LinkByIndex(routes[0].LinkIndex)
+ if err != nil {
+ return "", err
+ }
+
+ return link.Attrs().Name, nil
+}
+
+func plumbProxy(iPort *PortConfig, isDelete bool) error {
+ var (
+ err error
+ l io.Closer
+ )
+
+ portSpec := fmt.Sprintf("%d/%s", iPort.PublishedPort, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]))
+ if isDelete {
+ ingressProxyMu.Lock()
+ if listener, ok := ingressProxyTbl[portSpec]; ok {
+ if listener != nil {
+ listener.Close()
+ }
+ }
+ ingressProxyMu.Unlock()
+
+ return nil
+ }
+
+ switch iPort.Protocol {
+ case ProtocolTCP:
+ l, err = net.ListenTCP("tcp", &net.TCPAddr{Port: int(iPort.PublishedPort)})
+ case ProtocolUDP:
+ l, err = net.ListenUDP("udp", &net.UDPAddr{Port: int(iPort.PublishedPort)})
+ }
+
+ if err != nil {
+ return err
+ }
+
+ ingressProxyMu.Lock()
+ ingressProxyTbl[portSpec] = l
+ ingressProxyMu.Unlock()
+
+ return nil
+}
+
+// Invoke fwmarker reexec routine to mark vip destined packets with
+// the passed firewall mark.
+func invokeFWMarker(path string, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, eIP *net.IPNet, isDelete bool) error {
+ var ingressPortsFile string
+ if len(ingressPorts) != 0 {
+ f, err := ioutil.TempFile("", "port_configs")
+ if err != nil {
+ return err
+ }
+
+ buf, err := proto.Marshal(&EndpointRecord{
+ IngressPorts: ingressPorts,
+ })
+
+ n, err := f.Write(buf)
+ if err != nil {
+ f.Close()
+ return err
+ }
+
+ if n < len(buf) {
+ f.Close()
+ return io.ErrShortWrite
+ }
+
+ ingressPortsFile = f.Name()
+ f.Close()
+ }
+
+ addDelOpt := "-A"
+ if isDelete {
+ addDelOpt = "-D"
+ }
+
+ cmd := &exec.Cmd{
+ Path: reexec.Self(),
+ Args: append([]string{"fwmarker"}, path, vip.String(), fmt.Sprintf("%d", fwMark), addDelOpt, ingressPortsFile, eIP.IP.String()),
+ Stdout: os.Stdout,
+ Stderr: os.Stderr,
+ }
+
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("reexec failed: %v", err)
+ }
+
+ return nil
+}
+
+// Firewall marker reexec function.
+func fwMarker() {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ if len(os.Args) < 7 {
+ logrus.Error("invalid number of arguments..")
+ os.Exit(1)
+ }
+
+ var ingressPorts []*PortConfig
+ if os.Args[5] != "" {
+ buf, err := ioutil.ReadFile(os.Args[5])
+ if err != nil {
+ logrus.Errorf("Failed to read ports config file: %v", err)
+ os.Exit(6)
+ }
+
+ var epRec EndpointRecord
+ err = proto.Unmarshal(buf, &epRec)
+ if err != nil {
+ logrus.Errorf("Failed to unmarshal ports config data: %v", err)
+ os.Exit(7)
+ }
+
+ ingressPorts = epRec.IngressPorts
+ }
+
+ vip := os.Args[2]
+ fwMark, err := strconv.ParseUint(os.Args[3], 10, 32)
+ if err != nil {
+ logrus.Errorf("bad fwmark value(%s) passed: %v", os.Args[3], err)
+ os.Exit(2)
+ }
+ addDelOpt := os.Args[4]
+
+ rules := [][]string{}
+ for _, iPort := range ingressPorts {
+ rule := strings.Fields(fmt.Sprintf("-t nat %s PREROUTING -p %s --dport %d -j REDIRECT --to-port %d",
+ addDelOpt, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort, iPort.TargetPort))
+ rules = append(rules, rule)
+
+ rule = strings.Fields(fmt.Sprintf("-t mangle %s PREROUTING -p %s --dport %d -j MARK --set-mark %d",
+ addDelOpt, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort, fwMark))
+ rules = append(rules, rule)
+ }
+
+ ns, err := netns.GetFromPath(os.Args[1])
+ if err != nil {
+ logrus.Errorf("failed get network namespace %q: %v", os.Args[1], err)
+ os.Exit(3)
+ }
+ defer ns.Close()
+
+ if err := netns.Set(ns); err != nil {
+ logrus.Errorf("setting into container net ns %v failed, %v", os.Args[1], err)
+ os.Exit(4)
+ }
+
+ if len(ingressPorts) != 0 && addDelOpt == "-A" {
+ ruleParams := strings.Fields(fmt.Sprintf("-m ipvs --ipvs -j SNAT --to-source %s", os.Args[6]))
+ if !iptables.Exists("nat", "POSTROUTING", ruleParams...) {
+ rule := append(strings.Fields("-t nat -A POSTROUTING"), ruleParams...)
+ rules = append(rules, rule)
+
+ err := ioutil.WriteFile("/proc/sys/net/ipv4/vs/conntrack", []byte{'1', '\n'}, 0644)
+ if err != nil {
+ logrus.Errorf("Failed to write to /proc/sys/net/ipv4/vs/conntrack: %v", err)
+ os.Exit(8)
+ }
+ }
+ }
+
+ rule := strings.Fields(fmt.Sprintf("-t mangle %s OUTPUT -d %s/32 -j MARK --set-mark %d", addDelOpt, vip, fwMark))
+ rules = append(rules, rule)
+
+ for _, rule := range rules {
+ if err := iptables.RawCombinedOutputNative(rule...); err != nil {
+ logrus.Errorf("setting up rule failed, %v: %v", rule, err)
+ os.Exit(5)
+ }
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/service_unsupported.go b/vendor/src/github.com/docker/libnetwork/service_unsupported.go
new file mode 100644
index 0000000000..67984e2aba
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/service_unsupported.go
@@ -0,0 +1,19 @@
+// +build !linux
+
+package libnetwork
+
+import (
+ "fmt"
+ "net"
+)
+
+func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+ return fmt.Errorf("not supported")
+}
+
+func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+ return fmt.Errorf("not supported")
+}
+
+func (sb *sandbox) populateLoadbalancers(ep *endpoint) {
+}
diff --git a/vendor/src/github.com/docker/libnetwork/types/types.go b/vendor/src/github.com/docker/libnetwork/types/types.go
index c5ab053338..fb686de0b9 100644
--- a/vendor/src/github.com/docker/libnetwork/types/types.go
+++ b/vendor/src/github.com/docker/libnetwork/types/types.go
@@ -16,6 +16,15 @@ const (
IPv6
)
+// EncryptionKey is the libnetwork representation of the key distributed by the lead
+// manager.
+type EncryptionKey struct {
+ Subsystem string
+ Algorithm int32
+ Key []byte
+ LamportTime uint64
+}
+
// UUID represents a globally unique ID of various resources like network and endpoint
type UUID string
@@ -24,7 +33,7 @@ type QosPolicy struct {
MaxEgressBandwidth uint64
}
-// TransportPort represent a local Layer 4 endpoint
+// TransportPort represents a local Layer 4 endpoint
type TransportPort struct {
Proto Protocol
Port uint16
@@ -70,7 +79,7 @@ func (t *TransportPort) FromString(s string) error {
return BadRequestErrorf("invalid format for transport port: %s", s)
}
-// PortBinding represent a port binding between the container and the host
+// PortBinding represents a port binding between the container and the host
type PortBinding struct {
Proto Protocol
IP net.IP
@@ -116,7 +125,7 @@ func (p *PortBinding) GetCopy() PortBinding {
}
}
-// String return the PortBinding structure in string form
+// String returns the PortBinding structure in string form
func (p *PortBinding) String() string {
ret := fmt.Sprintf("%s/", p.Proto)
if p.IP != nil {
diff --git a/vendor/src/github.com/docker/swarmkit/LICENSE b/vendor/src/github.com/docker/swarmkit/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/docker/swarmkit/agent/agent.go b/vendor/src/github.com/docker/swarmkit/agent/agent.go
new file mode 100644
index 0000000000..bb77289be9
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/agent.go
@@ -0,0 +1,354 @@
+package agent
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+const (
+ initialSessionFailureBackoff = time.Second
+ maxSessionFailureBackoff = 8 * time.Second
+)
+
+// Agent implements the primary node functionality for a member of a swarm
+// cluster. The primary functionality id to run and report on the status of
+// tasks assigned to the node.
+type Agent struct {
+ config *Config
+
+ // The latest node object state from manager
+ // for this node known to the agent.
+ node *api.Node
+
+ keys []*api.EncryptionKey
+
+ sessionq chan sessionOperation
+ worker Worker
+
+ started chan struct{}
+ ready chan struct{}
+ stopped chan struct{} // requests shutdown
+ closed chan struct{} // only closed in run
+ err error // read only after closed is closed
+ mu sync.Mutex
+}
+
+// New returns a new agent, ready for task dispatch.
+func New(config *Config) (*Agent, error) {
+ if err := config.validate(); err != nil {
+ return nil, err
+ }
+
+ a := &Agent{
+ config: config,
+ worker: newWorker(config.DB, config.Executor),
+ sessionq: make(chan sessionOperation),
+ started: make(chan struct{}),
+ stopped: make(chan struct{}),
+ closed: make(chan struct{}),
+ ready: make(chan struct{}),
+ }
+
+ return a, nil
+}
+
+// Start begins execution of the agent in the provided context, if not already
+// started.
+func (a *Agent) Start(ctx context.Context) error {
+ select {
+ case <-a.started:
+ select {
+ case <-a.closed:
+ return a.err
+ case <-a.stopped:
+ return errAgentStopped
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ return errAgentStarted
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ close(a.started)
+ go a.run(ctx)
+
+ return nil
+}
+
+// Stop shuts down the agent, blocking until full shutdown. If the agent is not
+// started, Stop will block until Started.
+func (a *Agent) Stop(ctx context.Context) error {
+ select {
+ case <-a.started:
+ select {
+ case <-a.closed:
+ return a.err
+ case <-a.stopped:
+ select {
+ case <-a.closed:
+ return a.err
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ close(a.stopped)
+ // recurse and wait for closure
+ return a.Stop(ctx)
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ return errAgentNotStarted
+ }
+}
+
+// Err returns the error that caused the agent to shutdown or nil. Err blocks
+// until the agent is fully shutdown.
+func (a *Agent) Err(ctx context.Context) error {
+ select {
+ case <-a.closed:
+ return a.err
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// Ready returns a channel that will be closed when agent first becomes ready.
+func (a *Agent) Ready() <-chan struct{} {
+ return a.ready
+}
+
+func (a *Agent) run(ctx context.Context) {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ defer close(a.closed) // full shutdown.
+
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("module", "agent"))
+
+ log.G(ctx).Debugf("(*Agent).run")
+ defer log.G(ctx).Debugf("(*Agent).run exited")
+
+ var (
+ backoff time.Duration
+ session = newSession(ctx, a, backoff) // start the initial session
+ registered = session.registered
+ ready = a.ready // first session ready
+ sessionq chan sessionOperation
+ )
+
+ if err := a.worker.Init(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("worker initialization failed")
+ a.err = err
+ return // fatal?
+ }
+
+ // setup a reliable reporter to call back to us.
+ reporter := newStatusReporter(ctx, a)
+ defer reporter.Close()
+
+ a.worker.Listen(ctx, reporter)
+
+ for {
+ select {
+ case operation := <-sessionq:
+ operation.response <- operation.fn(session)
+ case msg := <-session.tasks:
+ if err := a.worker.Assign(ctx, msg.Tasks); err != nil {
+ log.G(ctx).WithError(err).Error("task assignment failed")
+ }
+ case msg := <-session.messages:
+ if err := a.handleSessionMessage(ctx, msg); err != nil {
+ log.G(ctx).WithError(err).Error("session message handler failed")
+ }
+ case <-registered:
+ log.G(ctx).Debugln("agent: registered")
+ if ready != nil {
+ close(ready)
+ }
+ ready = nil
+ registered = nil // we only care about this once per session
+ backoff = 0 // reset backoff
+ sessionq = a.sessionq
+ case err := <-session.errs:
+ // TODO(stevvooe): This may actually block if a session is closed
+ // but no error was sent. Session.close must only be called here
+ // for this to work.
+ if err != nil {
+ log.G(ctx).WithError(err).Error("agent: session failed")
+ backoff = initialSessionFailureBackoff + 2*backoff
+ if backoff > maxSessionFailureBackoff {
+ backoff = maxSessionFailureBackoff
+ }
+ }
+
+ if err := session.close(); err != nil {
+ log.G(ctx).WithError(err).Error("agent: closing session failed")
+ }
+ sessionq = nil
+ case <-session.closed:
+ log.G(ctx).Debugf("agent: rebuild session")
+
+ // select a session registration delay from backoff range.
+ delay := time.Duration(rand.Int63n(int64(backoff)))
+ session = newSession(ctx, a, delay)
+ registered = session.registered
+ sessionq = a.sessionq
+ case <-a.stopped:
+ // TODO(stevvooe): Wait on shutdown and cleanup. May need to pump
+ // this loop a few times.
+ return
+ case <-ctx.Done():
+ if a.err == nil {
+ a.err = ctx.Err()
+ }
+
+ return
+ }
+ }
+}
+
+func (a *Agent) handleSessionMessage(ctx context.Context, message *api.SessionMessage) error {
+ seen := map[api.Peer]struct{}{}
+ for _, manager := range message.Managers {
+ if manager.Peer.Addr == "" {
+ log.G(ctx).WithField("manager.addr", manager.Peer.Addr).
+ Warnf("skipping bad manager address")
+ continue
+ }
+
+ a.config.Managers.Observe(*manager.Peer, int(manager.Weight))
+ seen[*manager.Peer] = struct{}{}
+ }
+
+ if message.Node != nil {
+ if a.node == nil || !nodesEqual(a.node, message.Node) {
+ if a.config.NotifyRoleChange != nil {
+ a.config.NotifyRoleChange <- message.Node.Spec.Role
+ }
+ a.node = message.Node.Copy()
+ if err := a.config.Executor.Configure(ctx, a.node); err != nil {
+ log.G(ctx).WithError(err).Error("node configure failed")
+ }
+ }
+ }
+
+ // prune managers not in list.
+ for peer := range a.config.Managers.Weights() {
+ if _, ok := seen[peer]; !ok {
+ a.config.Managers.Remove(peer)
+ }
+ }
+
+ if message.NetworkBootstrapKeys == nil {
+ return nil
+ }
+
+ for _, key := range message.NetworkBootstrapKeys {
+ same := false
+ for _, agentKey := range a.keys {
+ if agentKey.LamportTime == key.LamportTime {
+ same = true
+ }
+ }
+ if !same {
+ a.keys = message.NetworkBootstrapKeys
+ if err := a.config.Executor.SetNetworkBootstrapKeys(a.keys); err != nil {
+ panic(fmt.Errorf("configuring network key failed"))
+ }
+ }
+ }
+
+ return nil
+}
+
+type sessionOperation struct {
+ fn func(session *session) error
+ response chan error
+}
+
+// withSession runs fn with the current session.
+func (a *Agent) withSession(ctx context.Context, fn func(session *session) error) error {
+ response := make(chan error, 1)
+ select {
+ case a.sessionq <- sessionOperation{
+ fn: fn,
+ response: response,
+ }:
+ select {
+ case err := <-response:
+ return err
+ case <-a.closed:
+ return ErrClosed
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ case <-a.closed:
+ return ErrClosed
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// UpdateTaskStatus attempts to send a task status update over the current session,
+// blocking until the operation is completed.
+//
+// If an error is returned, the operation should be retried.
+func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
+ log.G(ctx).WithField("task.id", taskID).Debugf("(*Agent).UpdateTaskStatus")
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ errs := make(chan error, 1)
+ if err := a.withSession(ctx, func(session *session) error {
+ go func() {
+ err := session.sendTaskStatus(ctx, taskID, status)
+ if err != nil {
+ if err == errTaskUnknown {
+ err = nil // dispatcher no longer cares about this task.
+ } else {
+ log.G(ctx).WithError(err).Error("sending task status update failed")
+ }
+ } else {
+ log.G(ctx).Debug("task status reported")
+ }
+
+ errs <- err
+ }()
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ select {
+ case err := <-errs:
+ return err
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// nodesEqual returns true if the node states are functionaly equal, ignoring status,
+// version and other superfluous fields.
+//
+// This used to decide whether or not to propagate a node update to executor.
+func nodesEqual(a, b *api.Node) bool {
+ a, b = a.Copy(), b.Copy()
+
+ a.Status, b.Status = api.NodeStatus{}, api.NodeStatus{}
+ a.Meta, b.Meta = api.Meta{}, api.Meta{}
+
+ return reflect.DeepEqual(a, b)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/config.go b/vendor/src/github.com/docker/swarmkit/agent/config.go
new file mode 100644
index 0000000000..589b56b6bd
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/config.go
@@ -0,0 +1,49 @@
+package agent
+
+import (
+ "fmt"
+
+ "github.com/boltdb/bolt"
+ "github.com/docker/swarmkit/agent/exec"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/picker"
+ "google.golang.org/grpc"
+)
+
+// Config provides values for an Agent.
+type Config struct {
+ // Hostname the name of host for agent instance.
+ Hostname string
+
+ // Managers provides the manager backend used by the agent. It will be
+ // updated with managers weights as observed by the agent.
+ Managers picker.Remotes
+
+ // Conn specifies the client connection Agent will use
+ Conn *grpc.ClientConn
+
+ // Executor specifies the executor to use for the agent.
+ Executor exec.Executor
+
+ // DB used for task storage. Must be open for the lifetime of the agent.
+ DB *bolt.DB
+
+ // NotifyRoleChange channel receives new roles from session messages.
+ NotifyRoleChange chan<- api.NodeRole
+}
+
+func (c *Config) validate() error {
+ if c.Conn == nil {
+ return fmt.Errorf("agent: Connection is required")
+ }
+
+ if c.Executor == nil {
+ return fmt.Errorf("agent: executor required")
+ }
+
+ if c.DB == nil {
+ return fmt.Errorf("agent: database required")
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/errors.go b/vendor/src/github.com/docker/swarmkit/agent/errors.go
new file mode 100644
index 0000000000..1089708f51
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/errors.go
@@ -0,0 +1,24 @@
+package agent
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ // ErrClosed is returned when an operation fails because the resource is closed.
+ ErrClosed = errors.New("agent: closed")
+
+ errNodeNotRegistered = fmt.Errorf("node not registered")
+
+ errAgentNotStarted = errors.New("agent: not started")
+ errAgentStarted = errors.New("agent: already started")
+ errAgentStopped = errors.New("agent: stopped")
+
+ errTaskNoContoller = errors.New("agent: no task controller")
+ errTaskNotAssigned = errors.New("agent: task not assigned")
+ errTaskStatusUpdateNoChange = errors.New("agent: no change in task status")
+ errTaskUnknown = errors.New("agent: task unknown")
+
+ errTaskInvalid = errors.New("task: invalid")
+)
diff --git a/vendor/src/github.com/docker/swarmkit/agent/exec/controller.go b/vendor/src/github.com/docker/swarmkit/agent/exec/controller.go
new file mode 100644
index 0000000000..59b54a1b3e
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/exec/controller.go
@@ -0,0 +1,267 @@
+package exec
+
+import (
+ "fmt"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+// ContainerController controls execution of container tasks.
+type ContainerController interface {
+ // ContainerStatus returns the status of the target container, if
+ // available. When the container is not available, the status will be nil.
+ ContainerStatus(ctx context.Context) (*api.ContainerStatus, error)
+}
+
+// Controller controls execution of a task.
+type Controller interface {
+ // Update the task definition seen by the controller. Will return
+ // ErrTaskUpdateFailed if the provided task definition changes fields that
+ // cannot be changed.
+ //
+ // Will be ignored if the task has exited.
+ Update(ctx context.Context, t *api.Task) error
+
+ // Prepare the task for execution. This should ensure that all resources
+ // are created such that a call to start should execute immediately.
+ Prepare(ctx context.Context) error
+
+ // Start the target and return when it has started successfully.
+ Start(ctx context.Context) error
+
+ // Wait blocks until the target has exited.
+ Wait(ctx context.Context) error
+
+ // Shutdown requests to exit the target gracefully.
+ Shutdown(ctx context.Context) error
+
+ // Terminate the target.
+ Terminate(ctx context.Context) error
+
+ // Remove all resources allocated by the controller.
+ Remove(ctx context.Context) error
+
+ // Close closes any ephemeral resources associated with controller instance.
+ Close() error
+}
+
+// Resolve attempts to get a controller from the executor and reports the
+// correct status depending on the tasks current state according to the result.
+//
+// Unlike Do, if an error is returned, the status should still be reported. The
+// error merely reports the
+func Resolve(ctx context.Context, task *api.Task, executor Executor) (Controller, *api.TaskStatus, error) {
+ status := task.Status.Copy()
+
+ defer func() {
+ logStateChange(ctx, task.DesiredState, task.Status.State, status.State)
+ }()
+
+ ctlr, err := executor.Controller(task)
+
+ // depending on the tasks state, a failed controller resolution has varying
+ // impact. The following expresses that impact.
+ if task.Status.State < api.TaskStateStarting {
+ if err != nil {
+ // before the task has been started, we consider it a rejection.
+ status.Message = "resolving controller failed"
+ status.Err = err.Error()
+ status.State = api.TaskStateRejected
+ } else if task.Status.State < api.TaskStateAccepted {
+ // we always want to proceed to accepted when we resolve the contoller
+ status.Message = "accepted"
+ status.State = api.TaskStateAccepted
+ }
+ }
+
+ return ctlr, status, err
+}
+
+// Do progresses the task state using the controller performing a single
+// operation on the controller. The return TaskStatus should be marked as the
+// new state of the task.
+//
+// The returned status should be reported and placed back on to task
+// before the next call. The operation can be cancelled by creating a
+// cancelling context.
+//
+// Errors from the task controller will reported on the returned status. Any
+// errors coming from this function should not be reported as related to the
+// individual task.
+//
+// If ErrTaskNoop is returned, it means a second call to Do will result in no
+// change. If ErrTaskDead is returned, calls to Do will no longer result in any
+// action.
+func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus, error) {
+ status := task.Status.Copy()
+
+ // stay in the current state.
+ noop := func(errs ...error) (*api.TaskStatus, error) {
+ return status, ErrTaskNoop
+ }
+
+ retry := func() (*api.TaskStatus, error) {
+ // while we retry on all errors, this allows us to explicitly declare
+ // retry cases.
+ return status, ErrTaskRetry
+ }
+
+ // transition moves the task to the next state.
+ transition := func(state api.TaskState, msg string) (*api.TaskStatus, error) {
+ current := status.State
+ status.State = state
+ status.Message = msg
+
+ if current > state {
+ panic("invalid state transition")
+ }
+ return status, nil
+ }
+
+ // returned when a fatal execution of the task is fatal. In this case, we
+ // proceed to a terminal error state and set the appropriate fields.
+ //
+ // Common checks for the nature of an error should be included here. If the
+ // error is determined not to be fatal for the task,
+ fatal := func(err error) (*api.TaskStatus, error) {
+ if err == nil {
+ panic("err must not be nil when fatal")
+ }
+
+ if IsTemporary(err) {
+ switch Cause(err) {
+ case context.DeadlineExceeded, context.Canceled:
+ // no need to set these errors, since these will more common.
+ default:
+ status.Err = err.Error()
+ }
+
+ return retry()
+ }
+
+ if cause := Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled {
+ return retry()
+ }
+
+ log.G(ctx).WithError(err).Error("fatal task error")
+ status.Err = err.Error()
+
+ switch {
+ case status.State < api.TaskStateStarting:
+ status.State = api.TaskStateRejected
+ case status.State > api.TaskStateStarting:
+ status.State = api.TaskStateFailed
+ }
+
+ return status, nil
+ }
+
+ // below, we have several callbacks that are run after the state transition
+ // is completed.
+ defer func() {
+ logStateChange(ctx, task.DesiredState, task.Status.State, status.State)
+ }()
+
+ // extract the container status from the container, if supported.
+ defer func() {
+ // only do this if in an active state
+ if status.State < api.TaskStateStarting {
+ return
+ }
+
+ cctlr, ok := ctlr.(ContainerController)
+ if !ok {
+ return
+ }
+
+ cstatus, err := cctlr.ContainerStatus(ctx)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("container status unavailable")
+ return
+ }
+
+ if cstatus != nil {
+ status.RuntimeStatus = &api.TaskStatus_Container{
+ Container: cstatus,
+ }
+ }
+ }()
+
+ if task.DesiredState == api.TaskStateShutdown {
+ if status.State >= api.TaskStateCompleted {
+ return noop()
+ }
+
+ if err := ctlr.Shutdown(ctx); err != nil {
+ return fatal(err)
+ }
+
+ return transition(api.TaskStateShutdown, "shutdown")
+ }
+
+ if status.State > task.DesiredState {
+ return noop() // way beyond desired state, pause
+ }
+
+ // the following states may proceed past desired state.
+ switch status.State {
+ case api.TaskStatePreparing:
+ if err := ctlr.Prepare(ctx); err != nil && err != ErrTaskPrepared {
+ return fatal(err)
+ }
+
+ return transition(api.TaskStateReady, "prepared")
+ case api.TaskStateStarting:
+ if err := ctlr.Start(ctx); err != nil && err != ErrTaskStarted {
+ return fatal(err)
+ }
+
+ return transition(api.TaskStateRunning, "started")
+ case api.TaskStateRunning:
+ if err := ctlr.Wait(ctx); err != nil {
+ // Wait should only proceed to failed if there is a terminal
+ // error. The only two conditions when this happens are when we
+ // get an exit code or when the container doesn't exist.
+ switch err := err.(type) {
+ case ExitCoder:
+ return transition(api.TaskStateFailed, "failed")
+ default:
+ // pursuant to the above comment, report fatal, but wrap as
+ // temporary.
+ return fatal(MakeTemporary(err))
+ }
+ }
+
+ return transition(api.TaskStateCompleted, "finished")
+ }
+
+ // The following represent "pause" states. We can only proceed when the
+ // desired state is beyond our current state.
+ if status.State >= task.DesiredState {
+ return noop()
+ }
+
+ switch status.State {
+ case api.TaskStateNew, api.TaskStateAllocated, api.TaskStateAssigned:
+ return transition(api.TaskStateAccepted, "accepted")
+ case api.TaskStateAccepted:
+ return transition(api.TaskStatePreparing, "preparing")
+ case api.TaskStateReady:
+ return transition(api.TaskStateStarting, "starting")
+ default: // terminal states
+ return noop()
+ }
+}
+
+func logStateChange(ctx context.Context, desired, previous, next api.TaskState) {
+ if previous != next {
+ fields := logrus.Fields{
+ "state.transition": fmt.Sprintf("%v->%v", previous, next),
+ "state.desired": desired,
+ }
+ log.G(ctx).WithFields(fields).Debug("state changed")
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go b/vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go
new file mode 100644
index 0000000000..7a45c9aa28
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go
@@ -0,0 +1,143 @@
+// Automatically generated by MockGen. DO NOT EDIT!
+// Source: controller.go
+
+package exec
+
+import (
+ api "github.com/docker/swarmkit/api"
+ gomock "github.com/golang/mock/gomock"
+ context "golang.org/x/net/context"
+)
+
+// Mock of ContainerController interface
+type MockContainerController struct {
+ ctrl *gomock.Controller
+ recorder *_MockContainerControllerRecorder
+}
+
+// Recorder for MockContainerController (not exported)
+type _MockContainerControllerRecorder struct {
+ mock *MockContainerController
+}
+
+func NewMockContainerController(ctrl *gomock.Controller) *MockContainerController {
+ mock := &MockContainerController{ctrl: ctrl}
+ mock.recorder = &_MockContainerControllerRecorder{mock}
+ return mock
+}
+
+func (_m *MockContainerController) EXPECT() *_MockContainerControllerRecorder {
+ return _m.recorder
+}
+
+func (_m *MockContainerController) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
+ ret := _m.ctrl.Call(_m, "ContainerStatus", ctx)
+ ret0, _ := ret[0].(*api.ContainerStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+func (_mr *_MockContainerControllerRecorder) ContainerStatus(arg0 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerStatus", arg0)
+}
+
+// Mock of Controller interface
+type MockController struct {
+ ctrl *gomock.Controller
+ recorder *_MockControllerRecorder
+}
+
+// Recorder for MockController (not exported)
+type _MockControllerRecorder struct {
+ mock *MockController
+}
+
+func NewMockController(ctrl *gomock.Controller) *MockController {
+ mock := &MockController{ctrl: ctrl}
+ mock.recorder = &_MockControllerRecorder{mock}
+ return mock
+}
+
+func (_m *MockController) EXPECT() *_MockControllerRecorder {
+ return _m.recorder
+}
+
+func (_m *MockController) Update(ctx context.Context, t *api.Task) error {
+ ret := _m.ctrl.Call(_m, "Update", ctx, t)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Update(arg0, arg1 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Update", arg0, arg1)
+}
+
+func (_m *MockController) Prepare(ctx context.Context) error {
+ ret := _m.ctrl.Call(_m, "Prepare", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Prepare(arg0 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Prepare", arg0)
+}
+
+func (_m *MockController) Start(ctx context.Context) error {
+ ret := _m.ctrl.Call(_m, "Start", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Start(arg0 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Start", arg0)
+}
+
+func (_m *MockController) Wait(ctx context.Context) error {
+ ret := _m.ctrl.Call(_m, "Wait", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Wait(arg0 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Wait", arg0)
+}
+
+func (_m *MockController) Shutdown(ctx context.Context) error {
+ ret := _m.ctrl.Call(_m, "Shutdown", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Shutdown(arg0 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Shutdown", arg0)
+}
+
+func (_m *MockController) Terminate(ctx context.Context) error {
+ ret := _m.ctrl.Call(_m, "Terminate", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Terminate(arg0 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Terminate", arg0)
+}
+
+func (_m *MockController) Remove(ctx context.Context) error {
+ ret := _m.ctrl.Call(_m, "Remove", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Remove(arg0 interface{}) *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Remove", arg0)
+}
+
+func (_m *MockController) Close() error {
+ ret := _m.ctrl.Call(_m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+func (_mr *_MockControllerRecorder) Close() *gomock.Call {
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Close")
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/exec/errors.go b/vendor/src/github.com/docker/swarmkit/agent/exec/errors.go
new file mode 100644
index 0000000000..db6467c6fc
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/exec/errors.go
@@ -0,0 +1,96 @@
+package exec
+
+import "errors"
+
+var (
+ // ErrRuntimeUnsupported encountered when a task requires a runtime
+ // unsupported by the executor.
+ ErrRuntimeUnsupported = errors.New("exec: unsupported runtime")
+
+ // ErrTaskPrepared is called if the task is already prepared.
+ ErrTaskPrepared = errors.New("exec: task already prepared")
+
+ // ErrTaskStarted can be returned from any operation that cannot be
+ // performed because the task has already been started. This does not imply
+ // that the task is running but rather that it is no longer valid to call
+ // Start.
+ ErrTaskStarted = errors.New("exec: task already started")
+
+ // ErrTaskUpdateRejected is returned if a task update is rejected by a controller.
+ ErrTaskUpdateRejected = errors.New("exec: task update rejected")
+
+ // ErrControllerClosed returned when a task controller has been closed.
+ ErrControllerClosed = errors.New("exec: controller closed")
+
+ // ErrTaskRetry is returned by Do when an operation failed by should be
+ // retried. The status should still be reported in this case.
+ ErrTaskRetry = errors.New("exec: task retry")
+
+ // ErrTaskNoop returns when the a subsequent call to Do will not result in
+ // advancing the task. Callers should avoid calling Do until the task has been updated.
+ ErrTaskNoop = errors.New("exec: task noop")
+)
+
+// ExitCoder is implemented by errors that have an exit code.
+type ExitCoder interface {
+ // ExitCode returns the exit code.
+ ExitCode() int
+}
+
+type causal interface {
+ Cause() error
+}
+
+// Cause returns the cause of the error, recursively.
+func Cause(err error) error {
+ for err != nil {
+ if causal, ok := err.(causal); ok {
+ err = causal.Cause()
+ } else {
+ break
+ }
+ }
+
+ return err
+}
+
+// Temporary indicates whether or not the error condition is temporary.
+//
+// If this is encountered in the controller, the failing operation will be
+// retried when this returns true. Otherwise, the operation is considered
+// fatal.
+type Temporary interface {
+ Temporary() bool
+}
+
+// MakeTemporary makes the error temporary.
+func MakeTemporary(err error) error {
+ return &temporary{error: err}
+}
+
+type temporary struct {
+ error
+}
+
+func (t *temporary) Cause() error { return t.error }
+func (t *temporary) Temporary() bool { return true }
+
+// IsTemporary returns true if the error or a recursive cause returns true for
+// temporary.
+func IsTemporary(err error) bool {
+ for err != nil {
+ if tmp, ok := err.(Temporary); ok {
+ if tmp.Temporary() {
+ return true
+ }
+ }
+
+ if causal, ok := err.(causal); !ok {
+ break
+ } else {
+ err = causal.Cause()
+ }
+ }
+
+ return false
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/exec/executor.go b/vendor/src/github.com/docker/swarmkit/agent/exec/executor.go
new file mode 100644
index 0000000000..d4f3da58f8
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/exec/executor.go
@@ -0,0 +1,23 @@
+package exec
+
+import (
+ "github.com/docker/swarmkit/api"
+ "golang.org/x/net/context"
+)
+
+// Executor provides controllers for tasks.
+type Executor interface {
+ // Describe returns the underlying node description.
+ Describe(ctx context.Context) (*api.NodeDescription, error)
+
+ // Configure uses the node object state to propogate node
+ // state to the underlying executor.
+ Configure(ctx context.Context, node *api.Node) error
+
+ // Controller provides a controller for the given task.
+ Controller(t *api.Task) (Controller, error)
+
+ // SetNetworkBootstrapKeys passes the symmetric keys from the
+ // manager to the executor.
+ SetNetworkBootstrapKeys([]*api.EncryptionKey) error
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/helpers.go b/vendor/src/github.com/docker/swarmkit/agent/helpers.go
new file mode 100644
index 0000000000..40b5fdf30e
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/helpers.go
@@ -0,0 +1,13 @@
+package agent
+
+import "golang.org/x/net/context"
+
+// runctx blocks until the function exits, closed is closed, or the context is
+// cancelled. Call as part os go statement.
+func runctx(ctx context.Context, closed chan struct{}, errs chan error, fn func(ctx context.Context) error) {
+ select {
+ case errs <- fn(ctx):
+ case <-closed:
+ case <-ctx.Done():
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/node.go b/vendor/src/github.com/docker/swarmkit/agent/node.go
new file mode 100644
index 0000000000..bfbab2a4c1
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/node.go
@@ -0,0 +1,738 @@
+package agent
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/boltdb/bolt"
+ "github.com/docker/swarmkit/agent/exec"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/ca"
+ "github.com/docker/swarmkit/ioutils"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager"
+ "github.com/docker/swarmkit/picker"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+const stateFilename = "state.json"
+
+// NodeConfig provides values for a Node.
+type NodeConfig struct {
+ // Hostname the name of host for agent instance.
+ Hostname string
+
+ // JoinAddrs specifies node that should be used for the initial connection to
+ // other manager in cluster. This should be only one address and optional,
+ // the actual remotes come from the stored state.
+ JoinAddr string
+
+ // StateDir specifies the directory the node uses to keep the state of the
+ // remote managers and certificates.
+ StateDir string
+
+ // CAHash to be used on the first certificate request.
+ CAHash string
+
+ // Secret to be used on the first certificate request.
+ Secret string
+
+ // ForceNewCluster creates a new cluster from current raft state.
+ ForceNewCluster bool
+
+ // ListenControlAPI specifies address the control API should listen on.
+ ListenControlAPI string
+
+ // ListenRemoteAPI specifies the address for the remote API that agents
+ // and raft members connect to.
+ ListenRemoteAPI string
+
+ // Executor specifies the executor to use for the agent.
+ Executor exec.Executor
+
+ // ElectionTick defines the amount of ticks needed without
+ // leader to trigger a new election
+ ElectionTick uint32
+
+ // HeartbeatTick defines the amount of ticks between each
+ // heartbeat sent to other members for health-check purposes
+ HeartbeatTick uint32
+
+ // todo: temporary to bypass promotion not working yet
+ IsManager bool
+}
+
+// Node implements the primary node functionality for a member of a swarm
+// cluster. Node handles workloads and may also run as a manager.
+type Node struct {
+ sync.RWMutex
+ config *NodeConfig
+ remotes *persistentRemotes
+ role string
+ roleCond *sync.Cond
+ conn *grpc.ClientConn
+ connCond *sync.Cond
+ nodeID string
+ started chan struct{}
+ stopped chan struct{}
+ ready chan struct{}
+ closed chan struct{}
+ err error
+ agent *Agent
+ manager *manager.Manager
+ roleChangeReq chan api.NodeRole
+}
+
+// NewNode returns new Node instance.
+func NewNode(c *NodeConfig) (*Node, error) {
+ if err := os.MkdirAll(c.StateDir, 0700); err != nil {
+ return nil, err
+ }
+ stateFile := filepath.Join(c.StateDir, stateFilename)
+ dt, err := ioutil.ReadFile(stateFile)
+ var p []api.Peer
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if err == nil {
+ if err := json.Unmarshal(dt, &p); err != nil {
+ return nil, err
+ }
+ }
+
+ n := &Node{
+ remotes: newPersistentRemotes(stateFile, p...),
+ role: ca.AgentRole,
+ config: c,
+ started: make(chan struct{}),
+ stopped: make(chan struct{}),
+ closed: make(chan struct{}),
+ ready: make(chan struct{}),
+ roleChangeReq: make(chan api.NodeRole, 1),
+ }
+ n.roleCond = sync.NewCond(n.RLocker())
+ n.connCond = sync.NewCond(n.RLocker())
+ if err := n.loadCertificates(); err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+// Start starts a node instance.
+func (n *Node) Start(ctx context.Context) error {
+ select {
+ case <-n.started:
+ select {
+ case <-n.closed:
+ return n.err
+ case <-n.stopped:
+ return errAgentStopped
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ return errAgentStarted
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ close(n.started)
+ go n.run(ctx)
+ return nil
+}
+
+func (n *Node) run(ctx context.Context) (err error) {
+ defer func() {
+ n.err = err
+ close(n.closed)
+ }()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("module", "node"))
+
+ go func() {
+ select {
+ case <-ctx.Done():
+ case <-n.stopped:
+ cancel()
+ }
+ }()
+
+ if (n.config.JoinAddr == "" && n.nodeID == "") || n.config.ForceNewCluster {
+ if err := n.bootstrapCA(); err != nil {
+ return err
+ }
+ }
+
+ if n.config.JoinAddr != "" || n.config.ForceNewCluster {
+ n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename), api.Peer{Addr: n.config.JoinAddr})
+ }
+
+ csrRole := n.role
+ if n.config.IsManager { // todo: temporary
+ csrRole = ca.ManagerRole
+ }
+
+ // Obtain new certs and setup TLS certificates renewal for this node:
+ // - We call LoadOrCreateSecurityConfig which blocks until a valid certificate has been issued
+ // - We retrieve the nodeID from LoadOrCreateSecurityConfig through the info channel. This allows
+ // us to display the ID before the certificate gets issued (for potential approval).
+ // - We wait for LoadOrCreateSecurityConfig to finish since we need a certificate to operate.
+ // - Given a valid certificate, spin a renewal go-routine that will ensure that certificates stay
+ // up to date.
+ nodeIDChan := make(chan string, 1)
+ caLoadDone := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ case <-caLoadDone:
+ case nodeID := <-nodeIDChan:
+ logrus.Debugf("Requesting certificate for NodeID: %v", nodeID)
+ n.Lock()
+ n.nodeID = nodeID
+ n.Unlock()
+ }
+ }()
+
+ certDir := filepath.Join(n.config.StateDir, "certificates")
+ securityConfig, err := ca.LoadOrCreateSecurityConfig(ctx, certDir, n.config.CAHash, n.config.Secret, csrRole, picker.NewPicker(n.remotes), nodeIDChan)
+ close(caLoadDone)
+ if err != nil {
+ return err
+ }
+
+ taskDBPath := filepath.Join(n.config.StateDir, "worker/tasks.db")
+ if err := os.MkdirAll(filepath.Dir(taskDBPath), 0777); err != nil {
+ return err
+ }
+
+ db, err := bolt.Open(taskDBPath, 0666, nil)
+ if err != nil {
+ return err
+ }
+
+ if err := n.loadCertificates(); err != nil {
+ return err
+ }
+
+ forceCertRenewal := make(chan struct{})
+ go func() {
+ n.RLock()
+ lastRole := n.role
+ n.RUnlock()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case apirole := <-n.roleChangeReq:
+ role := ca.AgentRole
+ if apirole == api.NodeRoleManager {
+ role = ca.ManagerRole
+ }
+ if lastRole != role {
+ forceCertRenewal <- struct{}{}
+ }
+ lastRole = role
+ }
+ }
+ }()
+
+ updates := ca.RenewTLSConfig(ctx, securityConfig, certDir, picker.NewPicker(n.remotes), forceCertRenewal)
+ go func() {
+ for {
+ select {
+ case certUpdate := <-updates:
+ if ctx.Err() != nil {
+ return
+ }
+ if certUpdate.Err != nil {
+ logrus.Warnf("error renewing TLS certificate: %v", certUpdate.Err)
+ continue
+ }
+ n.Lock()
+ n.role = certUpdate.Role
+ n.roleCond.Broadcast()
+ n.Unlock()
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ role := n.role
+
+ managerReady := make(chan struct{})
+ agentReady := make(chan struct{})
+ var managerErr error
+ var agentErr error
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ managerErr = n.runManager(ctx, securityConfig, managerReady) // store err and loop
+ wg.Done()
+ cancel()
+ }()
+ go func() {
+ agentErr = n.runAgent(ctx, db, securityConfig.ClientTLSCreds, agentReady)
+ wg.Done()
+ cancel()
+ }()
+
+ go func() {
+ <-agentReady
+ if role == ca.ManagerRole {
+ <-managerReady
+ }
+ close(n.ready)
+ }()
+
+ wg.Wait()
+ if managerErr != nil && managerErr != context.Canceled {
+ return managerErr
+ }
+ if agentErr != nil && agentErr != context.Canceled {
+ return agentErr
+ }
+ return err
+}
+
+// Stop stops node execution
+func (n *Node) Stop(ctx context.Context) error {
+ select {
+ case <-n.started:
+ select {
+ case <-n.closed:
+ return n.err
+ case <-n.stopped:
+ select {
+ case <-n.closed:
+ return n.err
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ close(n.stopped)
+ // recurse and wait for closure
+ return n.Stop(ctx)
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ return errAgentNotStarted
+ }
+}
+
+// Err returns the error that caused the node to shutdown or nil. Err blocks
+// until the node has fully shut down.
+func (n *Node) Err(ctx context.Context) error {
+ select {
+ case <-n.closed:
+ return n.err
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.TransportAuthenticator, ready chan<- struct{}) error {
+ var manager api.Peer
+ select {
+ case <-ctx.Done():
+ case manager = <-n.remotes.WaitSelect(ctx):
+ }
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ conn, err := grpc.Dial(manager.Addr,
+ grpc.WithPicker(picker.NewPicker(n.remotes, manager.Addr)),
+ grpc.WithTransportCredentials(creds),
+ grpc.WithBackoffMaxDelay(maxSessionFailureBackoff))
+ if err != nil {
+ return err
+ }
+
+ agent, err := New(&Config{
+ Hostname: n.config.Hostname,
+ Managers: n.remotes,
+ Executor: n.config.Executor,
+ DB: db,
+ Conn: conn,
+ NotifyRoleChange: n.roleChangeReq,
+ })
+ if err != nil {
+ return err
+ }
+ if err := agent.Start(ctx); err != nil {
+ return err
+ }
+
+ n.Lock()
+ n.agent = agent
+ n.Unlock()
+
+ defer func() {
+ n.Lock()
+ n.agent = nil
+ n.Unlock()
+ }()
+
+ go func() {
+ <-agent.Ready()
+ close(ready)
+ }()
+
+ // todo: manually call stop on context cancellation?
+
+ return agent.Err(context.Background())
+}
+
+// Ready returns a channel that is closed after node's initialization has
+// completes for the first time.
+func (n *Node) Ready(ctx context.Context) <-chan struct{} {
+ return n.ready
+}
+
+func (n *Node) waitRole(ctx context.Context, role string) <-chan struct{} {
+ c := make(chan struct{})
+ n.roleCond.L.Lock()
+ if role == n.role {
+ close(c)
+ n.roleCond.L.Unlock()
+ return c
+ }
+ go func() {
+ select {
+ case <-ctx.Done():
+ n.roleCond.Broadcast()
+ case <-c:
+ }
+ }()
+ go func() {
+ defer n.roleCond.L.Unlock()
+ defer close(c)
+ for role != n.role {
+ n.roleCond.Wait()
+ if ctx.Err() != nil {
+ return
+ }
+ }
+ }()
+ return c
+}
+
+func (n *Node) setControlSocket(conn *grpc.ClientConn) {
+ n.Lock()
+ n.conn = conn
+ n.connCond.Broadcast()
+ n.Unlock()
+}
+
+// ListenControlSocket listens changes of a connection for managing the
+// cluster control api
+func (n *Node) ListenControlSocket(ctx context.Context) <-chan *grpc.ClientConn {
+ c := make(chan *grpc.ClientConn, 1)
+ n.RLock()
+ conn := n.conn
+ c <- conn
+ done := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ n.connCond.Broadcast()
+ case <-done:
+ }
+ }()
+ go func() {
+ defer close(c)
+ defer close(done)
+ defer n.RUnlock()
+ for {
+ if ctx.Err() != nil {
+ return
+ }
+ if conn == n.conn {
+ n.connCond.Wait()
+ continue
+ }
+ conn = n.conn
+ c <- conn
+ }
+ }()
+ return c
+}
+
+// NodeID returns current node's ID. May be empty if not set.
+func (n *Node) NodeID() string {
+ n.RLock()
+ defer n.RUnlock()
+ return n.nodeID
+}
+
+// Manager return manager instance started by node. May be nil.
+func (n *Node) Manager() *manager.Manager {
+ n.RLock()
+ defer n.RUnlock()
+ return n.manager
+}
+
+// Agent returns agent instance started by node. May be nil.
+func (n *Node) Agent() *Agent {
+ n.RLock()
+ defer n.RUnlock()
+ return n.agent
+}
+
+// Remotes returns a list of known peers known to node.
+func (n *Node) Remotes() []api.Peer {
+ weights := n.remotes.Weights()
+ remotes := make([]api.Peer, 0, len(weights))
+ for p := range weights {
+ remotes = append(remotes, p)
+ }
+ return remotes
+}
+
+func (n *Node) loadCertificates() error {
+ certDir := filepath.Join(n.config.StateDir, "certificates")
+ rootCA, err := ca.GetLocalRootCA(certDir)
+ if err != nil {
+ if err == ca.ErrNoLocalRootCA {
+ return nil
+ }
+ return err
+ }
+ configPaths := ca.NewConfigPaths(certDir)
+ clientTLSCreds, _, err := ca.LoadTLSCreds(rootCA, configPaths.Node)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+
+ return fmt.Errorf("error while loading TLS Certificate in %s: %v", configPaths.Node.Cert, err)
+ }
+ // todo: try csr if no cert or store nodeID/role in some other way
+ n.Lock()
+ n.role = clientTLSCreds.Role()
+ n.nodeID = clientTLSCreds.NodeID()
+ n.roleCond.Broadcast()
+ n.Unlock()
+
+ return nil
+}
+
+func (n *Node) bootstrapCA() error {
+ if err := ca.BootstrapCluster(filepath.Join(n.config.StateDir, "certificates")); err != nil {
+ return err
+ }
+ return n.loadCertificates()
+}
+
+func (n *Node) initManagerConnection(ctx context.Context, ready chan<- struct{}) error {
+ opts := []grpc.DialOption{}
+ insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})
+ opts = append(opts, grpc.WithTransportCredentials(insecureCreds))
+ addr := n.config.ListenControlAPI
+ opts = append(opts, grpc.WithDialer(
+ func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }))
+ conn, err := grpc.Dial(addr, opts...)
+ if err != nil {
+ return err
+ }
+ state := grpc.Idle
+ for {
+ s, err := conn.WaitForStateChange(ctx, state)
+ if err != nil {
+ return err
+ }
+ if s == grpc.Ready {
+ n.setControlSocket(conn)
+ if ready != nil {
+ close(ready)
+ }
+ ready = nil
+ } else if state == grpc.Shutdown {
+ n.setControlSocket(nil)
+ }
+ state = s
+ }
+}
+
+func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}) error {
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.waitRole(ctx, ca.ManagerRole):
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ remoteAddr, _ := n.remotes.Select(n.nodeID)
+ m, err := manager.New(&manager.Config{
+ ForceNewCluster: n.config.ForceNewCluster,
+ ProtoAddr: map[string]string{
+ "tcp": n.config.ListenRemoteAPI,
+ "unix": n.config.ListenControlAPI,
+ },
+ SecurityConfig: securityConfig,
+ JoinRaft: remoteAddr.Addr,
+ StateDir: n.config.StateDir,
+ HeartbeatTick: n.config.HeartbeatTick,
+ ElectionTick: n.config.ElectionTick,
+ })
+ if err != nil {
+ return err
+ }
+ done := make(chan struct{})
+ go func() {
+ m.Run(context.Background()) // todo: store error
+ close(done)
+ }()
+
+ n.Lock()
+ n.manager = m
+ n.Unlock()
+
+ go n.initManagerConnection(ctx, ready)
+
+ go func() {
+ select {
+ case <-ready:
+ case <-ctx.Done():
+ }
+ if ctx.Err() == nil {
+ n.remotes.Observe(api.Peer{NodeID: n.nodeID, Addr: n.config.ListenRemoteAPI}, 5)
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ case <-n.waitRole(ctx, ca.AgentRole):
+ }
+
+ m.Stop(context.Background()) // todo: this should be sync like other components
+ <-done
+
+ ready = nil // ready event happens once, even on multiple starts
+ n.Lock()
+ n.manager = nil
+ n.Unlock()
+
+ if ctx.Err() != nil {
+ return err
+ }
+ }
+ }
+}
+
+type persistentRemotes struct {
+ sync.RWMutex
+ c *sync.Cond
+ picker.Remotes
+ storePath string
+ ch []chan api.Peer
+ lastSavedState []api.Peer
+}
+
+func newPersistentRemotes(f string, remotes ...api.Peer) *persistentRemotes {
+ pr := &persistentRemotes{
+ storePath: f,
+ Remotes: picker.NewRemotes(remotes...),
+ }
+ pr.c = sync.NewCond(pr.RLocker())
+ return pr
+}
+
+func (s *persistentRemotes) Observe(peer api.Peer, weight int) {
+ s.Lock()
+ s.Remotes.Observe(peer, weight)
+ s.c.Broadcast()
+ if err := s.save(); err != nil {
+ logrus.Errorf("error writing cluster state file: %v", err)
+ s.Unlock()
+ return
+ }
+ s.Unlock()
+ return
+}
+func (s *persistentRemotes) Remove(peers ...api.Peer) {
+ s.Remotes.Remove(peers...)
+ if err := s.save(); err != nil {
+ logrus.Errorf("error writing cluster state file: %v", err)
+ return
+ }
+ return
+}
+
+func (s *persistentRemotes) save() error {
+ weights := s.Weights()
+ remotes := make([]api.Peer, 0, len(weights))
+ for r := range weights {
+ remotes = append(remotes, r)
+ }
+ sort.Sort(sortablePeers(remotes))
+ if reflect.DeepEqual(remotes, s.lastSavedState) {
+ return nil
+ }
+ dt, err := json.Marshal(remotes)
+ if err != nil {
+ return err
+ }
+ s.lastSavedState = remotes
+ return ioutils.AtomicWriteFile(s.storePath, dt, 0600)
+}
+
+// WaitSelect waits until at least one remote becomes available and then selects one.
+func (s *persistentRemotes) WaitSelect(ctx context.Context) <-chan api.Peer {
+ c := make(chan api.Peer, 1)
+ s.RLock()
+ done := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ s.c.Broadcast()
+ case <-done:
+ }
+ }()
+ go func() {
+ defer s.RUnlock()
+ defer close(c)
+ defer close(done)
+ for {
+ if ctx.Err() != nil {
+ return
+ }
+ p, err := s.Select()
+ if err == nil {
+ c <- p
+ return
+ }
+ s.c.Wait()
+ }
+ }()
+ return c
+}
+
+// sortablePeers is a sort wrapper for []api.Peer
+type sortablePeers []api.Peer
+
+func (sp sortablePeers) Less(i, j int) bool { return sp[i].NodeID < sp[j].NodeID }
+
+func (sp sortablePeers) Len() int { return len(sp) }
+
+func (sp sortablePeers) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
diff --git a/vendor/src/github.com/docker/swarmkit/agent/reporter.go b/vendor/src/github.com/docker/swarmkit/agent/reporter.go
new file mode 100644
index 0000000000..3f9c462a3b
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/reporter.go
@@ -0,0 +1,124 @@
+package agent
+
+import (
+ "reflect"
+ "sync"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+// StatusReporter receives updates to task status. Method may be called
+// concurrently, so implementations should be goroutine-safe.
+type StatusReporter interface {
+ UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error
+}
+
+type statusReporterFunc func(ctx context.Context, taskID string, status *api.TaskStatus) error
+
+func (fn statusReporterFunc) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
+ return fn(ctx, taskID, status)
+}
+
+// statusReporter creates a reliable StatusReporter that will always succeed.
+// It handles several tasks at once, ensuring all statuses are reported.
+//
+// The reporter will continue reporting the current status until it succeeds.
+type statusReporter struct {
+ reporter StatusReporter
+ statuses map[string]*api.TaskStatus
+ mu sync.Mutex
+ cond sync.Cond
+ closed bool
+}
+
+func newStatusReporter(ctx context.Context, upstream StatusReporter) *statusReporter {
+ r := &statusReporter{
+ reporter: upstream,
+ statuses: make(map[string]*api.TaskStatus),
+ }
+
+ r.cond.L = &r.mu
+
+ go r.run(ctx)
+ return r
+}
+
+func (sr *statusReporter) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
+
+ current, ok := sr.statuses[taskID]
+ if ok {
+ if reflect.DeepEqual(current, status) {
+ return nil
+ }
+
+ if current.State > status.State {
+ return nil // ignore old updates
+ }
+ }
+ sr.statuses[taskID] = status
+ sr.cond.Signal()
+
+ return nil
+}
+
+func (sr *statusReporter) Close() error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
+
+ sr.closed = true
+ sr.cond.Signal()
+
+ return nil
+}
+
+func (sr *statusReporter) run(ctx context.Context) {
+ done := make(chan struct{})
+ defer close(done)
+
+ go func() {
+ select {
+ case <-ctx.Done():
+ sr.Close()
+ case <-done:
+ return
+ }
+ }()
+
+ sr.mu.Lock() // released during wait, below.
+ defer sr.mu.Unlock()
+
+ for {
+ if len(sr.statuses) == 0 {
+ sr.cond.Wait()
+ }
+
+ for taskID, status := range sr.statuses {
+ if sr.closed {
+ // TODO(stevvooe): Add support here for waiting until all
+ // statuses are flushed before shutting down.
+ return
+ }
+
+ delete(sr.statuses, taskID) // delete the entry, while trying to send.
+
+ sr.mu.Unlock()
+ err := sr.reporter.UpdateTaskStatus(ctx, taskID, status)
+ sr.mu.Lock()
+
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed reporting status to agent")
+
+ // place it back in the map, if not there, allowing us to pick
+ // the value if a new one came in when we were sending the last
+ // update.
+ if _, ok := sr.statuses[taskID]; !ok {
+ sr.statuses[taskID] = status
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/session.go b/vendor/src/github.com/docker/swarmkit/agent/session.go
new file mode 100644
index 0000000000..8fde4bdfec
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/session.go
@@ -0,0 +1,265 @@
+package agent
+
+import (
+ "errors"
+ "time"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+var (
+ errSessionDisconnect = errors.New("agent: session disconnect") // instructed to disconnect
+ errSessionClosed = errors.New("agent: session closed")
+)
+
+// session encapsulates one round of registration with the manager. session
+// starts the registration and heartbeat control cycle. Any failure will result
+// in a complete shutdown of the session and it must be reestablished.
+//
+// All communication with the master is done through session. Changes that
+// flow into the agent, such as task assignment, are called back into the
+// agent through errs, messages and tasks.
+type session struct {
+ agent *Agent
+ nodeID string
+ sessionID string
+ session api.Dispatcher_SessionClient
+ errs chan error
+ messages chan *api.SessionMessage
+ tasks chan *api.TasksMessage
+
+ registered chan struct{} // closed registration
+ closed chan struct{}
+}
+
+func newSession(ctx context.Context, agent *Agent, delay time.Duration) *session {
+ s := &session{
+ agent: agent,
+ errs: make(chan error),
+ messages: make(chan *api.SessionMessage),
+ tasks: make(chan *api.TasksMessage),
+ registered: make(chan struct{}),
+ closed: make(chan struct{}),
+ }
+
+ go s.run(ctx, delay)
+ return s
+}
+
+func (s *session) run(ctx context.Context, delay time.Duration) {
+ time.Sleep(delay) // delay before registering.
+
+ if err := s.start(ctx); err != nil {
+ select {
+ case s.errs <- err:
+ case <-s.closed:
+ case <-ctx.Done():
+ }
+ return
+ }
+
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("session.id", s.sessionID))
+
+ go runctx(ctx, s.closed, s.errs, s.heartbeat)
+ go runctx(ctx, s.closed, s.errs, s.watch)
+ go runctx(ctx, s.closed, s.errs, s.listen)
+
+ close(s.registered)
+}
+
+// start begins the session and returns the first SessionMessage.
+func (s *session) start(ctx context.Context) error {
+ log.G(ctx).Debugf("(*session).start")
+
+ client := api.NewDispatcherClient(s.agent.config.Conn)
+
+ description, err := s.agent.config.Executor.Describe(ctx)
+ if err != nil {
+ log.G(ctx).WithError(err).WithField("executor", s.agent.config.Executor).
+ Errorf("node description unavailable")
+ return err
+ }
+ // Override hostname
+ if s.agent.config.Hostname != "" {
+ description.Hostname = s.agent.config.Hostname
+ }
+
+ stream, err := client.Session(ctx, &api.SessionRequest{
+ Description: description,
+ })
+ if err != nil {
+ return err
+ }
+
+ msg, err := stream.Recv()
+ if err != nil {
+ return err
+ }
+
+ s.sessionID = msg.SessionID
+ s.session = stream
+
+ return s.handleSessionMessage(ctx, msg)
+}
+
+func (s *session) heartbeat(ctx context.Context) error {
+ log.G(ctx).Debugf("(*session).heartbeat")
+ client := api.NewDispatcherClient(s.agent.config.Conn)
+ heartbeat := time.NewTimer(1) // send out a heartbeat right away
+ defer heartbeat.Stop()
+
+ for {
+ select {
+ case <-heartbeat.C:
+ resp, err := client.Heartbeat(ctx, &api.HeartbeatRequest{
+ SessionID: s.sessionID,
+ })
+ if err != nil {
+ if grpc.Code(err) == codes.NotFound {
+ err = errNodeNotRegistered
+ }
+
+ return err
+ }
+
+ period, err := ptypes.Duration(&resp.Period)
+ if err != nil {
+ return err
+ }
+
+ heartbeat.Reset(period)
+ case <-s.closed:
+ return errSessionClosed
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
+
+func (s *session) listen(ctx context.Context) error {
+ defer s.session.CloseSend()
+ log.G(ctx).Debugf("(*session).listen")
+ for {
+ msg, err := s.session.Recv()
+ if err != nil {
+ return err
+ }
+
+ if err := s.handleSessionMessage(ctx, msg); err != nil {
+ return err
+ }
+ }
+}
+
+func (s *session) handleSessionMessage(ctx context.Context, msg *api.SessionMessage) error {
+ select {
+ case s.messages <- msg:
+ return nil
+ case <-s.closed:
+ return errSessionClosed
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (s *session) watch(ctx context.Context) error {
+ log.G(ctx).Debugf("(*session).watch")
+ client := api.NewDispatcherClient(s.agent.config.Conn)
+ watch, err := client.Tasks(ctx, &api.TasksRequest{
+ SessionID: s.sessionID})
+ if err != nil {
+ return err
+ }
+
+ for {
+ resp, err := watch.Recv()
+ if err != nil {
+ return err
+ }
+
+ select {
+ case s.tasks <- resp:
+ case <-s.closed:
+ return errSessionClosed
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
+
+// sendTaskStatus uses the current session to send the status of a single task.
+func (s *session) sendTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
+
+ client := api.NewDispatcherClient(s.agent.config.Conn)
+ if _, err := client.UpdateTaskStatus(ctx, &api.UpdateTaskStatusRequest{
+ SessionID: s.sessionID,
+ Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{
+ {
+ TaskID: taskID,
+ Status: status,
+ },
+ },
+ }); err != nil {
+ // TODO(stevvooe): Dispatcher should not return this error. Status
+ // reports for unknown tasks should be ignored.
+ if grpc.Code(err) == codes.NotFound {
+ return errTaskUnknown
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTaskStatusRequest_TaskStatusUpdate) ([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, error) {
+ if len(updates) < 1 {
+ return nil, nil
+ }
+
+ const batchSize = 1024
+ select {
+ case <-s.registered:
+ select {
+ case <-s.closed:
+ return updates, ErrClosed
+ default:
+ }
+ case <-s.closed:
+ return updates, ErrClosed
+ case <-ctx.Done():
+ return updates, ctx.Err()
+ }
+
+ client := api.NewDispatcherClient(s.agent.config.Conn)
+ n := batchSize
+
+ if len(updates) < n {
+ n = len(updates)
+ }
+
+ if _, err := client.UpdateTaskStatus(ctx, &api.UpdateTaskStatusRequest{
+ SessionID: s.sessionID,
+ Updates: updates[:n],
+ }); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed sending task status batch size of %d", len(updates[:n]))
+ return updates, err
+ }
+
+ return updates[n:], nil
+}
+
+func (s *session) close() error {
+ select {
+ case <-s.closed:
+ return errSessionClosed
+ default:
+ close(s.closed)
+ return nil
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/storage.go b/vendor/src/github.com/docker/swarmkit/agent/storage.go
new file mode 100644
index 0000000000..e9cb19934a
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/storage.go
@@ -0,0 +1,224 @@
+package agent
+
+import (
+ "bytes"
+
+ "github.com/boltdb/bolt"
+ "github.com/docker/swarmkit/api"
+ "github.com/gogo/protobuf/proto"
+)
+
+// Layout:
+//
+// bucket(v1.tasks.<id>) ->
+// data (task protobuf)
+// status (task status protobuf)
+// assigned (key present)
+var (
+ bucketKeyStorageVersion = []byte("v1")
+ bucketKeyTasks = []byte("tasks")
+ bucketKeyAssigned = []byte("assigned")
+ bucketKeyData = []byte("data")
+ bucketKeyStatus = []byte("status")
+)
+
+type bucketKeyPath [][]byte
+
+func (bk bucketKeyPath) String() string {
+ return string(bytes.Join([][]byte(bk), []byte("/")))
+}
+
+// InitDB prepares a database for writing task data.
+//
+// Proper buckets will be created if they don't already exist.
+func InitDB(db *bolt.DB) error {
+ return db.Update(func(tx *bolt.Tx) error {
+ _, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyTasks)
+ return err
+ })
+}
+
+// GetTask retrieves the task with id from the datastore.
+func GetTask(tx *bolt.Tx, id string) (*api.Task, error) {
+ var t api.Task
+
+ if err := withTaskBucket(tx, id, func(bkt *bolt.Bucket) error {
+ p := bkt.Get([]byte("data"))
+ if p == nil {
+ return errTaskUnknown
+ }
+
+ return proto.Unmarshal(p, &t)
+ }); err != nil {
+ return nil, err
+ }
+
+ return &t, nil
+}
+
+// WalkTasks walks all tasks in the datastore.
+func WalkTasks(tx *bolt.Tx, fn func(task *api.Task) error) error {
+ bkt := getTasksBucket(tx)
+ if bkt == nil {
+ return nil
+ }
+
+ return bkt.ForEach(func(k, v []byte) error {
+ tbkt := bkt.Bucket(k)
+
+ p := tbkt.Get(bucketKeyData)
+ var t api.Task
+ if err := proto.Unmarshal(p, &t); err != nil {
+ return err
+ }
+
+ return fn(&t)
+ })
+}
+
+// TaskAssigned returns true if the task is assigned to the node.
+func TaskAssigned(tx *bolt.Tx, id string) bool {
+ bkt := getTaskBucket(tx, id)
+ if bkt == nil {
+ return false
+ }
+
+ return len(bkt.Get(bucketKeyAssigned)) > 0
+}
+
+// GetTaskStatus returns the current status for the task.
+func GetTaskStatus(tx *bolt.Tx, id string) (*api.TaskStatus, error) {
+ var ts api.TaskStatus
+ if err := withTaskBucket(tx, id, func(bkt *bolt.Bucket) error {
+ p := bkt.Get(bucketKeyStatus)
+ if p == nil {
+ return errTaskUnknown
+ }
+
+ return proto.Unmarshal(p, &ts)
+ }); err != nil {
+ return nil, err
+ }
+
+ return &ts, nil
+}
+
+// WalkTaskStatus calls fn for the status of each task.
+func WalkTaskStatus(tx *bolt.Tx, fn func(id string, status *api.TaskStatus) error) error {
+ bkt := getTasksBucket(tx)
+ if bkt == nil {
+ return nil
+ }
+
+ return bkt.ForEach(func(k, v []byte) error {
+ tbkt := bkt.Bucket(k)
+
+ p := tbkt.Get(bucketKeyStatus)
+ var ts api.TaskStatus
+ if err := proto.Unmarshal(p, &ts); err != nil {
+ return err
+ }
+
+ return fn(string(k), &ts)
+ })
+}
+
+// PutTask places the task into the database.
+func PutTask(tx *bolt.Tx, task *api.Task) error {
+ return withCreateTaskBucketIfNotExists(tx, task.ID, func(bkt *bolt.Bucket) error {
+ task = task.Copy()
+ task.Status = api.TaskStatus{} // blank out the status.
+
+ p, err := proto.Marshal(task)
+ if err != nil {
+ return err
+ }
+ return bkt.Put(bucketKeyData, p)
+ })
+}
+
+// PutTaskStatus updates the status for the task with id.
+func PutTaskStatus(tx *bolt.Tx, id string, status *api.TaskStatus) error {
+ return withCreateTaskBucketIfNotExists(tx, id, func(bkt *bolt.Bucket) error {
+ p, err := proto.Marshal(status)
+ if err != nil {
+ return err
+ }
+ return bkt.Put([]byte("status"), p)
+ })
+}
+
+// DeleteTask completely removes the task from the database.
+func DeleteTask(tx *bolt.Tx, id string) error {
+ bkt := getTasksBucket(tx)
+ if bkt == nil {
+ return nil
+ }
+
+ return bkt.DeleteBucket([]byte(id))
+}
+
+// SetTaskAssignment sets the current assignment state.
+func SetTaskAssignment(tx *bolt.Tx, id string, assigned bool) error {
+ return withTaskBucket(tx, id, func(bkt *bolt.Bucket) error {
+ if assigned {
+ return bkt.Put([]byte("assigned"), []byte{0xFF})
+ }
+ return bkt.Delete([]byte("assigned"))
+ })
+}
+
+func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) {
+ bkt, err := tx.CreateBucketIfNotExists(keys[0])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, key := range keys[1:] {
+ bkt, err = bkt.CreateBucketIfNotExists(key)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return bkt, nil
+}
+
+func withCreateTaskBucketIfNotExists(tx *bolt.Tx, id string, fn func(bkt *bolt.Bucket) error) error {
+ bkt, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyTasks, []byte(id))
+ if err != nil {
+ return err
+ }
+
+ return fn(bkt)
+}
+
+func withTaskBucket(tx *bolt.Tx, id string, fn func(bkt *bolt.Bucket) error) error {
+ bkt := getTaskBucket(tx, id)
+ if bkt == nil {
+ return errTaskUnknown
+ }
+
+ return fn(bkt)
+}
+
+func getTaskBucket(tx *bolt.Tx, id string) *bolt.Bucket {
+ return getBucket(tx, bucketKeyStorageVersion, bucketKeyTasks, []byte(id))
+}
+
+func getTasksBucket(tx *bolt.Tx) *bolt.Bucket {
+ return getBucket(tx, bucketKeyStorageVersion, bucketKeyTasks)
+}
+
+func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket {
+ bkt := tx.Bucket(keys[0])
+
+ for _, key := range keys[1:] {
+ if bkt == nil {
+ break
+ }
+ bkt = bkt.Bucket(key)
+ }
+
+ return bkt
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/task.go b/vendor/src/github.com/docker/swarmkit/agent/task.go
new file mode 100644
index 0000000000..797f93c769
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/task.go
@@ -0,0 +1,243 @@
+package agent
+
+import (
+ "reflect"
+ "time"
+
+ "github.com/docker/swarmkit/agent/exec"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+// taskManager manages all aspects of task execution and reporting for an agent
+// through state management.
+type taskManager struct {
+ task *api.Task
+ ctlr exec.Controller
+ reporter StatusReporter
+
+ updateq chan *api.Task
+
+ shutdown chan struct{}
+ closed chan struct{}
+}
+
+func newTaskManager(ctx context.Context, task *api.Task, ctlr exec.Controller, reporter StatusReporter) *taskManager {
+ t := &taskManager{
+ task: task.Copy(),
+ ctlr: ctlr,
+ reporter: reporter,
+ updateq: make(chan *api.Task),
+ shutdown: make(chan struct{}),
+ closed: make(chan struct{}),
+ }
+ go t.run(ctx)
+ return t
+}
+
+// Update the task data.
+func (tm *taskManager) Update(ctx context.Context, task *api.Task) error {
+ select {
+ case tm.updateq <- task:
+ return nil
+ case <-tm.closed:
+ return ErrClosed
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// Close shuts down the task manager, blocking until it is stopped.
+func (tm *taskManager) Close() error {
+ select {
+ case <-tm.closed:
+ return nil
+ case <-tm.shutdown:
+ default:
+ close(tm.shutdown)
+ }
+
+ select {
+ case <-tm.closed:
+ return nil
+ }
+}
+
+func (tm *taskManager) run(ctx context.Context) {
+ ctx, cancelAll := context.WithCancel(ctx)
+ defer cancelAll() // cancel all child operations on exit.
+
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("module", "taskmanager"))
+
+ var (
+ opctx context.Context
+ cancel context.CancelFunc
+ run = make(chan struct{}, 1)
+ statusq = make(chan *api.TaskStatus)
+ errs = make(chan error)
+ shutdown = tm.shutdown
+ updated bool // true if the task was updated.
+ )
+
+ defer func() {
+ // closure picks up current value of cancel.
+ if cancel != nil {
+ cancel()
+ }
+ }()
+
+ run <- struct{}{} // prime the pump
+ for {
+ select {
+ case <-run:
+ // always check for shutdown before running.
+ select {
+ case <-shutdown:
+ continue // ignore run request and handle shutdown
+ case <-tm.closed:
+ continue
+ default:
+ }
+
+ opctx, cancel = context.WithCancel(ctx)
+
+ // Several variables need to be snapshotted for the closure below.
+ opcancel := cancel // fork for the closure
+ running := tm.task.Copy() // clone the task before dispatch
+ statusqLocal := statusq
+ updatedLocal := updated // capture state of update for goroutine
+ updated = false
+ go runctx(ctx, tm.closed, errs, func(ctx context.Context) error {
+ defer opcancel()
+
+ if updatedLocal {
+ // before we do anything, update the task for the controller.
+ // always update the controller before running.
+ if err := tm.ctlr.Update(opctx, running); err != nil {
+ log.G(ctx).WithError(err).Error("updating task controller failed")
+ return err
+ }
+ }
+
+ status, err := exec.Do(opctx, running, tm.ctlr)
+ if status != nil {
+ // always report the status if we get one back. This
+ // returns to the manager loop, then reports the status
+ // upstream.
+ select {
+ case statusqLocal <- status:
+ case <-ctx.Done(): // not opctx, since that may have been cancelled.
+ }
+
+ if err := tm.reporter.UpdateTaskStatus(ctx, running.ID, status); err != nil {
+ log.G(ctx).WithError(err).Error("failed reporting status to agent")
+ }
+ }
+
+ return err
+ })
+ case err := <-errs:
+ // This branch is always executed when an operations completes. The
+ // goal is to decide whether or not we re-dispatch the operation.
+ cancel = nil
+
+ switch err {
+ case exec.ErrTaskNoop:
+ if !updated {
+ continue // wait till getting pumped via update.
+ }
+ case exec.ErrTaskRetry:
+ // TODO(stevvooe): Add exponential backoff with random jitter
+ // here. For now, this backoff is enough to keep the task
+ // manager from running away with the CPU.
+ time.AfterFunc(time.Second, func() {
+ errs <- nil // repump this branch, with no err
+ })
+ continue
+ case nil, context.Canceled, context.DeadlineExceeded:
+ // no log in this case
+ default:
+ log.G(ctx).WithError(err).Error("task operation failed")
+ }
+
+ select {
+ case run <- struct{}{}:
+ default:
+ }
+ case status := <-statusq:
+ tm.task.Status = *status
+ case task := <-tm.updateq:
+ if tasksEqual(task, tm.task) {
+ continue // ignore the update
+ }
+
+ if task.ID != tm.task.ID {
+ log.G(ctx).WithField("task.update.id", task.ID).Error("received update for incorrect task")
+ continue
+ }
+
+ if task.DesiredState < tm.task.DesiredState {
+ log.G(ctx).WithField("task.update.desiredstate", task.DesiredState).
+ Error("ignoring task update with invalid desired state")
+ continue
+ }
+
+ task = task.Copy()
+ task.Status = tm.task.Status // overwrite our status, as it is canonical.
+ tm.task = task
+ updated = true
+
+ // we have accepted the task update
+ if cancel != nil {
+ cancel() // cancel outstanding if necessary.
+ } else {
+ // If this channel op fails, it means there is already a
+ // message un the run queue.
+ select {
+ case run <- struct{}{}:
+ default:
+ }
+ }
+ case <-shutdown:
+ if cancel != nil {
+ // cancel outstanding operation.
+ cancel()
+ }
+
+ // TODO(stevvooe): This should be left for the repear.
+
+ // make an attempt at removing. this is best effort. any errors will be
+ // retried by the reaper later.
+ if err := tm.ctlr.Remove(ctx); err != nil {
+ log.G(ctx).WithError(err).WithField("task.id", tm.task.ID).Error("remove task failed")
+ }
+
+ if err := tm.ctlr.Close(); err != nil {
+ log.G(ctx).WithError(err).Error("error closing controller")
+ }
+ // disable everything, and prepare for closing.
+ statusq = nil
+ errs = nil
+ shutdown = nil
+ close(tm.closed)
+ case <-tm.closed:
+ return
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// tasksEqual returns true if the tasks are functionaly equal, ignoring status,
+// version and other superfluous fields.
+//
+// This used to decide whether or not to propagate a task update to a controller.
+func tasksEqual(a, b *api.Task) bool {
+ a, b = a.Copy(), b.Copy()
+
+ a.Status, b.Status = api.TaskStatus{}, api.TaskStatus{}
+ a.Meta, b.Meta = api.Meta{}, api.Meta{}
+
+ return reflect.DeepEqual(a, b)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/worker.go b/vendor/src/github.com/docker/swarmkit/agent/worker.go
new file mode 100644
index 0000000000..b188fe140d
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/agent/worker.go
@@ -0,0 +1,260 @@
+package agent
+
+import (
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/boltdb/bolt"
+ "github.com/docker/swarmkit/agent/exec"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+// Worker implements the core task management logic and persistence. It
+// coordinates the set of assignments with the executor.
+type Worker interface {
+ // Init prepares the worker for task assignment.
+ Init(ctx context.Context) error
+
+ // Assign the set of tasks to the worker. Tasks outside of this set will be
+ // removed.
+ Assign(ctx context.Context, tasks []*api.Task) error
+
+ // Listen to updates about tasks controlled by the worker. When first
+ // called, the reporter will receive all updates for all tasks controlled
+ // by the worker.
+ //
+ // The listener will be removed if the context is cancelled.
+ Listen(ctx context.Context, reporter StatusReporter)
+}
+
+// statusReporterKey protects removal map from panic.
+type statusReporterKey struct {
+ StatusReporter
+}
+
+type worker struct {
+ db *bolt.DB
+ executor exec.Executor
+ listeners map[*statusReporterKey]struct{}
+
+ taskManagers map[string]*taskManager
+ mu sync.RWMutex
+}
+
+func newWorker(db *bolt.DB, executor exec.Executor) *worker {
+ return &worker{
+ db: db,
+ executor: executor,
+ listeners: make(map[*statusReporterKey]struct{}),
+ taskManagers: make(map[string]*taskManager),
+ }
+}
+
+// Init prepares the worker for assignments.
+func (w *worker) Init(ctx context.Context) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("module", "worker"))
+
+ // TODO(stevvooe): Start task cleanup process.
+
+ // read the tasks from the database and start any task managers that may be needed.
+ return w.db.Update(func(tx *bolt.Tx) error {
+ return WalkTasks(tx, func(task *api.Task) error {
+ if !TaskAssigned(tx, task.ID) {
+ // NOTE(stevvooe): If tasks can survive worker restart, we need
+ // to startup the controller and ensure they are removed. For
+ // now, we can simply remove them from the database.
+ if err := DeleteTask(tx, task.ID); err != nil {
+ log.G(ctx).WithError(err).Errorf("error removing task %v", task.ID)
+ }
+ return nil
+ }
+
+ status, err := GetTaskStatus(tx, task.ID)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("unable to read tasks status")
+ return nil
+ }
+
+ task.Status = *status // merges the status into the task, ensuring we start at the right point.
+ return w.startTask(ctx, tx, task)
+ })
+ })
+}
+
+// Assign the set of tasks to the worker. Any tasks not previously known will
+// be started. Any tasks that are in the task set and already running will be
+// updated, if possible. Any tasks currently running on the
+// worker outside the task set will be terminated.
+func (w *worker) Assign(ctx context.Context, tasks []*api.Task) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ tx, err := w.db.Begin(true)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed starting transaction against task database")
+ return err
+ }
+ defer tx.Rollback()
+
+ log.G(ctx).WithField("len(tasks)", len(tasks)).Debug("(*worker).Assign")
+ assigned := map[string]struct{}{}
+
+ for _, task := range tasks {
+ log.G(ctx).WithFields(
+ logrus.Fields{
+ "task.id": task.ID,
+ "task.desiredstate": task.DesiredState}).Debug("assigned")
+ if err := PutTask(tx, task); err != nil {
+ return err
+ }
+
+ if err := SetTaskAssignment(tx, task.ID, true); err != nil {
+ return err
+ }
+
+ if mgr, ok := w.taskManagers[task.ID]; ok {
+ if err := mgr.Update(ctx, task); err != nil && err != ErrClosed {
+ log.G(ctx).WithError(err).Error("failed updating assigned task")
+ }
+ } else {
+ // we may have still seen the task, let's grab the status from
+ // storage and replace it with our status, if we have it.
+ status, err := GetTaskStatus(tx, task.ID)
+ if err != nil {
+ if err != errTaskUnknown {
+ return err
+ }
+
+ // never seen before, register the provided status
+ if err := PutTaskStatus(tx, task.ID, &task.Status); err != nil {
+ return err
+ }
+
+ status = &task.Status
+ } else {
+ task.Status = *status // overwrite the stale manager status with ours.
+ }
+
+ w.startTask(ctx, tx, task)
+ }
+
+ assigned[task.ID] = struct{}{}
+ }
+
+ for id, tm := range w.taskManagers {
+ if _, ok := assigned[id]; ok {
+ continue
+ }
+
+ ctx := log.WithLogger(ctx, log.G(ctx).WithField("task.id", id))
+ if err := SetTaskAssignment(tx, id, false); err != nil {
+ log.G(ctx).WithError(err).Error("error setting task assignment in database")
+ continue
+ }
+
+ delete(w.taskManagers, id)
+
+ go func(tm *taskManager) {
+ // when a task is no longer assigned, we shutdown the task manager for
+ // it and leave cleanup to the sweeper.
+ if err := tm.Close(); err != nil {
+ log.G(ctx).WithError(err).Error("error closing task manager")
+ }
+ }(tm)
+ }
+
+ return tx.Commit()
+}
+
+func (w *worker) Listen(ctx context.Context, reporter StatusReporter) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ key := &statusReporterKey{reporter}
+ w.listeners[key] = struct{}{}
+
+ go func() {
+ <-ctx.Done()
+ w.mu.Lock()
+ defer w.mu.Lock()
+ delete(w.listeners, key) // remove the listener if the context is closed.
+ }()
+
+ // report the current statuses to the new listener
+ if err := w.db.View(func(tx *bolt.Tx) error {
+ return WalkTaskStatus(tx, func(id string, status *api.TaskStatus) error {
+ return reporter.UpdateTaskStatus(ctx, id, status)
+ })
+ }); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed reporting initial statuses to registered listener %v", reporter)
+ }
+}
+
+func (w *worker) startTask(ctx context.Context, tx *bolt.Tx, task *api.Task) error {
+ _, err := w.taskManager(ctx, tx, task) // side-effect taskManager creation.
+
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed to start taskManager")
+ }
+
+ // TODO(stevvooe): Add start method for taskmanager
+ return nil
+}
+
+func (w *worker) taskManager(ctx context.Context, tx *bolt.Tx, task *api.Task) (*taskManager, error) {
+ if tm, ok := w.taskManagers[task.ID]; ok {
+ return tm, nil
+ }
+
+ tm, err := w.newTaskManager(ctx, tx, task)
+ if err != nil {
+ return nil, err
+ }
+ w.taskManagers[task.ID] = tm
+ return tm, nil
+}
+
+func (w *worker) newTaskManager(ctx context.Context, tx *bolt.Tx, task *api.Task) (*taskManager, error) {
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("task.id", task.ID))
+
+ ctlr, status, err := exec.Resolve(ctx, task, w.executor)
+ if err := w.updateTaskStatus(ctx, tx, task.ID, status); err != nil {
+ log.G(ctx).WithError(err).Error("error updating task status after controller resolution")
+ }
+
+ if err != nil {
+ log.G(ctx).Error("controller resolution failed")
+ return nil, err
+ }
+
+ return newTaskManager(ctx, task, ctlr, statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.db.Update(func(tx *bolt.Tx) error {
+ return w.updateTaskStatus(ctx, tx, taskID, status)
+ })
+ })), nil
+}
+
+// updateTaskStatus reports statuses to listeners, read lock must be held.
+func (w *worker) updateTaskStatus(ctx context.Context, tx *bolt.Tx, taskID string, status *api.TaskStatus) error {
+ if err := PutTaskStatus(tx, taskID, status); err != nil {
+ log.G(ctx).WithError(err).Error("failed writing status to disk")
+ return err
+ }
+
+ // broadcast the task status out.
+ for key := range w.listeners {
+ if err := key.StatusReporter.UpdateTaskStatus(ctx, taskID, status); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed updating status for reporter %v", key.StatusReporter)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/ca.pb.go b/vendor/src/github.com/docker/swarmkit/api/ca.pb.go
new file mode 100644
index 0000000000..c1c58d07fe
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/ca.pb.go
@@ -0,0 +1,1616 @@
+// Code generated by protoc-gen-gogo.
+// source: ca.proto
+// DO NOT EDIT!
+
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/docker/swarmkit/protobuf/plugin"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import codes "google.golang.org/grpc/codes"
+import metadata "google.golang.org/grpc/metadata"
+import transport "google.golang.org/grpc/transport"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type NodeCertificateStatusRequest struct {
+ NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+}
+
+func (m *NodeCertificateStatusRequest) Reset() { *m = NodeCertificateStatusRequest{} }
+func (*NodeCertificateStatusRequest) ProtoMessage() {}
+func (*NodeCertificateStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{0} }
+
+type NodeCertificateStatusResponse struct {
+ Status *IssuanceStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
+ Certificate *Certificate `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"`
+}
+
+func (m *NodeCertificateStatusResponse) Reset() { *m = NodeCertificateStatusResponse{} }
+func (*NodeCertificateStatusResponse) ProtoMessage() {}
+func (*NodeCertificateStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{1} }
+
+type IssueNodeCertificateRequest struct {
+ Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"`
+ CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"`
+ // Secret represents a user-provided string that is necessary for new
+ // nodes to join the cluster
+ Secret string `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"`
+}
+
+func (m *IssueNodeCertificateRequest) Reset() { *m = IssueNodeCertificateRequest{} }
+func (*IssueNodeCertificateRequest) ProtoMessage() {}
+func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} }
+
+type IssueNodeCertificateResponse struct {
+ NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+}
+
+func (m *IssueNodeCertificateResponse) Reset() { *m = IssueNodeCertificateResponse{} }
+func (*IssueNodeCertificateResponse) ProtoMessage() {}
+func (*IssueNodeCertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{3} }
+
+type GetRootCACertificateRequest struct {
+}
+
+func (m *GetRootCACertificateRequest) Reset() { *m = GetRootCACertificateRequest{} }
+func (*GetRootCACertificateRequest) ProtoMessage() {}
+func (*GetRootCACertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{4} }
+
+type GetRootCACertificateResponse struct {
+ Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"`
+}
+
+func (m *GetRootCACertificateResponse) Reset() { *m = GetRootCACertificateResponse{} }
+func (*GetRootCACertificateResponse) ProtoMessage() {}
+func (*GetRootCACertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{5} }
+
+func init() {
+ proto.RegisterType((*NodeCertificateStatusRequest)(nil), "docker.swarmkit.v1.NodeCertificateStatusRequest")
+ proto.RegisterType((*NodeCertificateStatusResponse)(nil), "docker.swarmkit.v1.NodeCertificateStatusResponse")
+ proto.RegisterType((*IssueNodeCertificateRequest)(nil), "docker.swarmkit.v1.IssueNodeCertificateRequest")
+ proto.RegisterType((*IssueNodeCertificateResponse)(nil), "docker.swarmkit.v1.IssueNodeCertificateResponse")
+ proto.RegisterType((*GetRootCACertificateRequest)(nil), "docker.swarmkit.v1.GetRootCACertificateRequest")
+ proto.RegisterType((*GetRootCACertificateResponse)(nil), "docker.swarmkit.v1.GetRootCACertificateResponse")
+}
+
+type authenticatedWrapperCAServer struct {
+ local CAServer
+ authorize func(context.Context, []string) error
+}
+
+func NewAuthenticatedWrapperCAServer(local CAServer, authorize func(context.Context, []string) error) CAServer {
+ return &authenticatedWrapperCAServer{
+ local: local,
+ authorize: authorize,
+ }
+}
+
+func (p *authenticatedWrapperCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) {
+
+ return p.local.GetRootCACertificate(ctx, r)
+}
+
+type authenticatedWrapperNodeCAServer struct {
+ local NodeCAServer
+ authorize func(context.Context, []string) error
+}
+
+func NewAuthenticatedWrapperNodeCAServer(local NodeCAServer, authorize func(context.Context, []string) error) NodeCAServer {
+ return &authenticatedWrapperNodeCAServer{
+ local: local,
+ authorize: authorize,
+ }
+}
+
+func (p *authenticatedWrapperNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) {
+
+ return p.local.IssueNodeCertificate(ctx, r)
+}
+
+func (p *authenticatedWrapperNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) {
+
+ return p.local.NodeCertificateStatus(ctx, r)
+}
+
+func (m *NodeCertificateStatusRequest) Copy() *NodeCertificateStatusRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &NodeCertificateStatusRequest{
+ NodeID: m.NodeID,
+ }
+
+ return o
+}
+
+func (m *NodeCertificateStatusResponse) Copy() *NodeCertificateStatusResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &NodeCertificateStatusResponse{
+ Status: m.Status.Copy(),
+ Certificate: m.Certificate.Copy(),
+ }
+
+ return o
+}
+
+func (m *IssueNodeCertificateRequest) Copy() *IssueNodeCertificateRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &IssueNodeCertificateRequest{
+ Role: m.Role,
+ CSR: m.CSR,
+ Secret: m.Secret,
+ }
+
+ return o
+}
+
+func (m *IssueNodeCertificateResponse) Copy() *IssueNodeCertificateResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &IssueNodeCertificateResponse{
+ NodeID: m.NodeID,
+ }
+
+ return o
+}
+
+func (m *GetRootCACertificateRequest) Copy() *GetRootCACertificateRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetRootCACertificateRequest{}
+
+ return o
+}
+
+func (m *GetRootCACertificateResponse) Copy() *GetRootCACertificateResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetRootCACertificateResponse{
+ Certificate: m.Certificate,
+ }
+
+ return o
+}
+
+func (this *NodeCertificateStatusRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.NodeCertificateStatusRequest{")
+ s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NodeCertificateStatusResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.NodeCertificateStatusResponse{")
+ if this.Status != nil {
+ s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n")
+ }
+ if this.Certificate != nil {
+ s = append(s, "Certificate: "+fmt.Sprintf("%#v", this.Certificate)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *IssueNodeCertificateRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.IssueNodeCertificateRequest{")
+ s = append(s, "Role: "+fmt.Sprintf("%#v", this.Role)+",\n")
+ s = append(s, "CSR: "+fmt.Sprintf("%#v", this.CSR)+",\n")
+ s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *IssueNodeCertificateResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.IssueNodeCertificateResponse{")
+ s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetRootCACertificateRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.GetRootCACertificateRequest{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetRootCACertificateResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetRootCACertificateResponse{")
+ s = append(s, "Certificate: "+fmt.Sprintf("%#v", this.Certificate)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringCa(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringCa(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion2
+
+// Client API for CA service
+
+type CAClient interface {
+ GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error)
+}
+
+type cAClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewCAClient(cc *grpc.ClientConn) CAClient {
+ return &cAClient{cc}
+}
+
+func (c *cAClient) GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) {
+ out := new(GetRootCACertificateResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetRootCACertificate", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for CA service
+
+type CAServer interface {
+ GetRootCACertificate(context.Context, *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error)
+}
+
+func RegisterCAServer(s *grpc.Server, srv CAServer) {
+ s.RegisterService(&_CA_serviceDesc, srv)
+}
+
+func _CA_GetRootCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetRootCACertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(CAServer).GetRootCACertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.CA/GetRootCACertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(CAServer).GetRootCACertificate(ctx, req.(*GetRootCACertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _CA_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "docker.swarmkit.v1.CA",
+ HandlerType: (*CAServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetRootCACertificate",
+ Handler: _CA_GetRootCACertificate_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+}
+
+// Client API for NodeCA service
+
+type NodeCAClient interface {
+ IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error)
+ NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error)
+}
+
+type nodeCAClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewNodeCAClient(cc *grpc.ClientConn) NodeCAClient {
+ return &nodeCAClient{cc}
+}
+
+func (c *nodeCAClient) IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) {
+ out := new(IssueNodeCertificateResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *nodeCAClient) NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) {
+ out := new(NodeCertificateStatusResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for NodeCA service
+
+type NodeCAServer interface {
+ IssueNodeCertificate(context.Context, *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error)
+ NodeCertificateStatus(context.Context, *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error)
+}
+
+func RegisterNodeCAServer(s *grpc.Server, srv NodeCAServer) {
+ s.RegisterService(&_NodeCA_serviceDesc, srv)
+}
+
+func _NodeCA_IssueNodeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(IssueNodeCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NodeCAServer).IssueNodeCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NodeCAServer).IssueNodeCertificate(ctx, req.(*IssueNodeCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NodeCA_NodeCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NodeCertificateStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NodeCAServer).NodeCertificateStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NodeCAServer).NodeCertificateStatus(ctx, req.(*NodeCertificateStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _NodeCA_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "docker.swarmkit.v1.NodeCA",
+ HandlerType: (*NodeCAServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "IssueNodeCertificate",
+ Handler: _NodeCA_IssueNodeCertificate_Handler,
+ },
+ {
+ MethodName: "NodeCertificateStatus",
+ Handler: _NodeCA_NodeCertificateStatus_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+}
+
+func (m *NodeCertificateStatusRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeCertificateStatusRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintCa(data, i, uint64(len(m.NodeID)))
+ i += copy(data[i:], m.NodeID)
+ }
+ return i, nil
+}
+
+func (m *NodeCertificateStatusResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeCertificateStatusResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Status != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintCa(data, i, uint64(m.Status.Size()))
+ n1, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.Certificate != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintCa(data, i, uint64(m.Certificate.Size()))
+ n2, err := m.Certificate.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func (m *IssueNodeCertificateRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IssueNodeCertificateRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Role != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintCa(data, i, uint64(m.Role))
+ }
+ if len(m.CSR) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintCa(data, i, uint64(len(m.CSR)))
+ i += copy(data[i:], m.CSR)
+ }
+ if len(m.Secret) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintCa(data, i, uint64(len(m.Secret)))
+ i += copy(data[i:], m.Secret)
+ }
+ return i, nil
+}
+
+func (m *IssueNodeCertificateResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IssueNodeCertificateResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintCa(data, i, uint64(len(m.NodeID)))
+ i += copy(data[i:], m.NodeID)
+ }
+ return i, nil
+}
+
+func (m *GetRootCACertificateRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetRootCACertificateRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *GetRootCACertificateResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetRootCACertificateResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Certificate) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintCa(data, i, uint64(len(m.Certificate)))
+ i += copy(data[i:], m.Certificate)
+ }
+ return i, nil
+}
+
+func encodeFixed64Ca(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Ca(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintCa(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+type raftProxyCAServer struct {
+ local CAServer
+ connSelector *raftpicker.ConnSelector
+ cluster raftpicker.RaftCluster
+ ctxMods []func(context.Context) (context.Context, error)
+}
+
+func NewRaftProxyCAServer(local CAServer, connSelector *raftpicker.ConnSelector, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) CAServer {
+ redirectChecker := func(ctx context.Context) (context.Context, error) {
+ s, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ }
+ addr := s.ServerTransport().RemoteAddr().String()
+ md, ok := metadata.FromContext(ctx)
+ if ok && len(md["redirect"]) != 0 {
+ return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ }
+ if !ok {
+ md = metadata.New(map[string]string{})
+ }
+ md["redirect"] = append(md["redirect"], addr)
+ return metadata.NewContext(ctx, md), nil
+ }
+ mods := []func(context.Context) (context.Context, error){redirectChecker}
+ mods = append(mods, ctxMod)
+
+ return &raftProxyCAServer{
+ local: local,
+ cluster: cluster,
+ connSelector: connSelector,
+ ctxMods: mods,
+ }
+}
+func (p *raftProxyCAServer) runCtxMods(ctx context.Context) (context.Context, error) {
+ var err error
+ for _, mod := range p.ctxMods {
+ ctx, err = mod(ctx)
+ if err != nil {
+ return ctx, err
+ }
+ }
+ return ctx, nil
+}
+
+func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.GetRootCACertificate(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewCAClient(conn).GetRootCACertificate(ctx, r)
+}
+
+type raftProxyNodeCAServer struct {
+ local NodeCAServer
+ connSelector *raftpicker.ConnSelector
+ cluster raftpicker.RaftCluster
+ ctxMods []func(context.Context) (context.Context, error)
+}
+
+func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector *raftpicker.ConnSelector, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) NodeCAServer {
+ redirectChecker := func(ctx context.Context) (context.Context, error) {
+ s, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ }
+ addr := s.ServerTransport().RemoteAddr().String()
+ md, ok := metadata.FromContext(ctx)
+ if ok && len(md["redirect"]) != 0 {
+ return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ }
+ if !ok {
+ md = metadata.New(map[string]string{})
+ }
+ md["redirect"] = append(md["redirect"], addr)
+ return metadata.NewContext(ctx, md), nil
+ }
+ mods := []func(context.Context) (context.Context, error){redirectChecker}
+ mods = append(mods, ctxMod)
+
+ return &raftProxyNodeCAServer{
+ local: local,
+ cluster: cluster,
+ connSelector: connSelector,
+ ctxMods: mods,
+ }
+}
+func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context) (context.Context, error) {
+ var err error
+ for _, mod := range p.ctxMods {
+ ctx, err = mod(ctx)
+ if err != nil {
+ return ctx, err
+ }
+ }
+ return ctx, nil
+}
+
+func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.IssueNodeCertificate(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewNodeCAClient(conn).IssueNodeCertificate(ctx, r)
+}
+
+func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.NodeCertificateStatus(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewNodeCAClient(conn).NodeCertificateStatus(ctx, r)
+}
+
+func (m *NodeCertificateStatusRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NodeID)
+ if l > 0 {
+ n += 1 + l + sovCa(uint64(l))
+ }
+ return n
+}
+
+func (m *NodeCertificateStatusResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Status != nil {
+ l = m.Status.Size()
+ n += 1 + l + sovCa(uint64(l))
+ }
+ if m.Certificate != nil {
+ l = m.Certificate.Size()
+ n += 1 + l + sovCa(uint64(l))
+ }
+ return n
+}
+
+func (m *IssueNodeCertificateRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Role != 0 {
+ n += 1 + sovCa(uint64(m.Role))
+ }
+ l = len(m.CSR)
+ if l > 0 {
+ n += 1 + l + sovCa(uint64(l))
+ }
+ l = len(m.Secret)
+ if l > 0 {
+ n += 1 + l + sovCa(uint64(l))
+ }
+ return n
+}
+
+func (m *IssueNodeCertificateResponse) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NodeID)
+ if l > 0 {
+ n += 1 + l + sovCa(uint64(l))
+ }
+ return n
+}
+
+func (m *GetRootCACertificateRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *GetRootCACertificateResponse) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Certificate)
+ if l > 0 {
+ n += 1 + l + sovCa(uint64(l))
+ }
+ return n
+}
+
+func sovCa(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozCa(x uint64) (n int) {
+ return sovCa(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *NodeCertificateStatusRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NodeCertificateStatusRequest{`,
+ `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NodeCertificateStatusResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NodeCertificateStatusResponse{`,
+ `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "IssuanceStatus", "IssuanceStatus", 1) + `,`,
+ `Certificate:` + strings.Replace(fmt.Sprintf("%v", this.Certificate), "Certificate", "Certificate", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IssueNodeCertificateRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IssueNodeCertificateRequest{`,
+ `Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+ `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`,
+ `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IssueNodeCertificateResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IssueNodeCertificateResponse{`,
+ `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetRootCACertificateRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetRootCACertificateRequest{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetRootCACertificateResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetRootCACertificateResponse{`,
+ `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringCa(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *NodeCertificateStatusRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeCertificateStatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeCertificateStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCa
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCa(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCa
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeCertificateStatusResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeCertificateStatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeCertificateStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCa
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Status == nil {
+ m.Status = &IssuanceStatus{}
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCa
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Certificate == nil {
+ m.Certificate = &Certificate{}
+ }
+ if err := m.Certificate.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCa(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCa
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IssueNodeCertificateRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IssueNodeCertificateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IssueNodeCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ m.Role = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Role |= (NodeRole(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCa
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CSR = append(m.CSR[:0], data[iNdEx:postIndex]...)
+ if m.CSR == nil {
+ m.CSR = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCa
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secret = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCa(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCa
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IssueNodeCertificateResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IssueNodeCertificateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IssueNodeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCa
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCa(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCa
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetRootCACertificateRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetRootCACertificateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetRootCACertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCa(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCa
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetRootCACertificateResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetRootCACertificateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetRootCACertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCa
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...)
+ if m.Certificate == nil {
+ m.Certificate = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCa(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCa
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipCa(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthCa
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCa
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipCa(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthCa = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowCa = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorCa = []byte{
+ // 442 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x48, 0x4e, 0xd4, 0x2b,
+ 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f,
+ 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, 0x2d, 0x86,
+ 0x28, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, 0x41, 0x2c, 0xa8, 0xa8, 0x70, 0x41,
+ 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x3e, 0x84, 0x82, 0x08, 0x2a, 0x39, 0x73, 0xc9, 0xf8, 0xe5, 0xa7,
+ 0xa4, 0x3a, 0xa7, 0x16, 0x95, 0x64, 0xa6, 0x65, 0x26, 0x27, 0x96, 0xa4, 0x06, 0x97, 0x24, 0x96,
+ 0x94, 0x16, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x29, 0x73, 0xb1, 0xe7, 0x01, 0xe5,
+ 0xe3, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0xb8, 0x1e, 0xdd, 0x93, 0x67, 0x03,
+ 0x69, 0xf1, 0x74, 0x09, 0x62, 0x03, 0x49, 0x79, 0xa6, 0x28, 0xcd, 0x63, 0xe4, 0x92, 0xc5, 0x61,
+ 0x4a, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x15, 0x17, 0x5b, 0x31, 0x58, 0x04, 0x6c, 0x0a,
+ 0xb7, 0x91, 0x92, 0x1e, 0xa6, 0x1f, 0xf4, 0x3c, 0x8b, 0x8b, 0x4b, 0x13, 0xf3, 0x92, 0x61, 0x7a,
+ 0xa1, 0x3a, 0x84, 0x1c, 0xb9, 0xb8, 0x93, 0x11, 0x06, 0x4b, 0x30, 0x81, 0x0d, 0x90, 0xc7, 0x66,
+ 0x00, 0x92, 0xfd, 0x41, 0xc8, 0x7a, 0x94, 0x9a, 0x18, 0xb9, 0xa4, 0x41, 0xa6, 0xa7, 0xa2, 0xb9,
+ 0x12, 0xe6, 0x4b, 0x03, 0x2e, 0x96, 0xa2, 0xfc, 0x9c, 0x54, 0xb0, 0xe3, 0xf8, 0x8c, 0x64, 0xb0,
+ 0x99, 0x0d, 0xd2, 0x19, 0x04, 0x54, 0x13, 0x04, 0x56, 0x29, 0x24, 0xc9, 0xc5, 0x9c, 0x5c, 0x5c,
+ 0x04, 0x76, 0x0c, 0x8f, 0x13, 0x3b, 0x30, 0x4c, 0x98, 0x9d, 0x83, 0x83, 0x82, 0x40, 0x62, 0x42,
+ 0x62, 0x40, 0xbf, 0xa6, 0x26, 0x17, 0xa5, 0x96, 0x48, 0x30, 0x83, 0x42, 0x2c, 0x08, 0xca, 0x03,
+ 0x05, 0x35, 0x76, 0x37, 0x40, 0xc3, 0x88, 0xa8, 0xa0, 0x96, 0xe5, 0x92, 0x76, 0x4f, 0x2d, 0x09,
+ 0xca, 0xcf, 0x2f, 0x71, 0x76, 0xc4, 0xf4, 0x88, 0x92, 0x03, 0x97, 0x0c, 0x76, 0x69, 0xa8, 0x1d,
+ 0x0a, 0xa8, 0x61, 0x09, 0xb2, 0x87, 0x07, 0x25, 0xa8, 0x8c, 0xba, 0x18, 0xb9, 0x98, 0x9c, 0x1d,
+ 0x85, 0x9a, 0x19, 0xb9, 0x44, 0xb0, 0x99, 0x24, 0xa4, 0x8f, 0x2d, 0x70, 0xf0, 0x38, 0x49, 0xca,
+ 0x80, 0x78, 0x0d, 0x10, 0x47, 0x2a, 0x71, 0x9c, 0x5a, 0xf7, 0x6e, 0x06, 0x13, 0x93, 0x00, 0xa3,
+ 0xd1, 0x74, 0x26, 0x2e, 0x70, 0x00, 0x40, 0x1d, 0x84, 0x2d, 0xf8, 0xb0, 0x3b, 0x08, 0x4f, 0x64,
+ 0x63, 0x77, 0x10, 0xbe, 0x98, 0x41, 0x38, 0x48, 0xa8, 0x8d, 0x91, 0x4b, 0x14, 0x6b, 0x4a, 0x17,
+ 0x32, 0xc0, 0x95, 0x68, 0x70, 0x65, 0x2d, 0x29, 0x43, 0x12, 0x74, 0xa0, 0x3b, 0xc4, 0x49, 0xe6,
+ 0xc4, 0x43, 0x39, 0x86, 0x1b, 0x40, 0xfc, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x13,
+ 0x40, 0x7c, 0x01, 0x88, 0x1f, 0x00, 0x71, 0x12, 0x1b, 0x38, 0x73, 0x1b, 0x03, 0x02, 0x00, 0x00,
+ 0xff, 0xff, 0x42, 0x13, 0xc9, 0x2a, 0x34, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/ca.proto b/vendor/src/github.com/docker/swarmkit/api/ca.proto
new file mode 100644
index 0000000000..149a8a1ac8
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/ca.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "types.proto";
+import "gogoproto/gogo.proto";
+import "plugin/plugin.proto";
+
+// CA defines the RPC methods for requesting certificates from a CA.
+
+service CA {
+ rpc GetRootCACertificate(GetRootCACertificateRequest) returns (GetRootCACertificateResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
+ };
+}
+
+service NodeCA {
+ rpc IssueNodeCertificate(IssueNodeCertificateRequest) returns (IssueNodeCertificateResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
+ };
+ rpc NodeCertificateStatus(NodeCertificateStatusRequest) returns (NodeCertificateStatusResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
+ };
+}
+
+message NodeCertificateStatusRequest {
+ string node_id = 1 [(gogoproto.customname) = "NodeID"];
+}
+
+message NodeCertificateStatusResponse {
+ IssuanceStatus status = 1;
+ Certificate certificate = 2;
+}
+
+message IssueNodeCertificateRequest {
+ NodeRole role = 1;
+ bytes csr = 2 [(gogoproto.customname) = "CSR"];
+ // Secret represents a user-provided string that is necessary for new
+ // nodes to join the cluster
+ string secret = 3;
+}
+
+message IssueNodeCertificateResponse {
+ string node_id = 1 [(gogoproto.customname) = "NodeID"];
+}
+
+message GetRootCACertificateRequest {}
+
+message GetRootCACertificateResponse {
+ bytes certificate = 1;
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/control.pb.go b/vendor/src/github.com/docker/swarmkit/api/control.pb.go
new file mode 100644
index 0000000000..653089936d
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/control.pb.go
@@ -0,0 +1,10185 @@
+// Code generated by protoc-gen-gogo.
+// source: control.proto
+// DO NOT EDIT!
+
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/docker/swarmkit/protobuf/plugin"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import codes "google.golang.org/grpc/codes"
+import metadata "google.golang.org/grpc/metadata"
+import transport "google.golang.org/grpc/transport"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type GetNodeRequest struct {
+ NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+}
+
+func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} }
+func (*GetNodeRequest) ProtoMessage() {}
+func (*GetNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} }
+
+type GetNodeResponse struct {
+ Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"`
+}
+
+func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} }
+func (*GetNodeResponse) ProtoMessage() {}
+func (*GetNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} }
+
+type ListNodesRequest struct {
+ Filters *ListNodesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListNodesRequest) Reset() { *m = ListNodesRequest{} }
+func (*ListNodesRequest) ProtoMessage() {}
+func (*ListNodesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} }
+
+type ListNodesRequest_Filters struct {
+ Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"`
+ IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"`
+ Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Memberships []NodeSpec_Membership `protobuf:"varint,4,rep,name=memberships,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"memberships,omitempty"`
+ Roles []NodeRole `protobuf:"varint,5,rep,name=roles,enum=docker.swarmkit.v1.NodeRole" json:"roles,omitempty"`
+}
+
+func (m *ListNodesRequest_Filters) Reset() { *m = ListNodesRequest_Filters{} }
+func (*ListNodesRequest_Filters) ProtoMessage() {}
+func (*ListNodesRequest_Filters) Descriptor() ([]byte, []int) {
+ return fileDescriptorControl, []int{2, 0}
+}
+
+type ListNodesResponse struct {
+ Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"`
+}
+
+func (m *ListNodesResponse) Reset() { *m = ListNodesResponse{} }
+func (*ListNodesResponse) ProtoMessage() {}
+func (*ListNodesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} }
+
+// UpdateNodeRequest requests an update to the specified node. This may be used
+// to request a new availability for a node, such as PAUSE. Invalid updates
+// will be denied and cause an error.
+type UpdateNodeRequest struct {
+ NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ NodeVersion *Version `protobuf:"bytes,2,opt,name=node_version,json=nodeVersion" json:"node_version,omitempty"`
+ Spec *NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"`
+}
+
+func (m *UpdateNodeRequest) Reset() { *m = UpdateNodeRequest{} }
+func (*UpdateNodeRequest) ProtoMessage() {}
+func (*UpdateNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} }
+
+type UpdateNodeResponse struct {
+ Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"`
+}
+
+func (m *UpdateNodeResponse) Reset() { *m = UpdateNodeResponse{} }
+func (*UpdateNodeResponse) ProtoMessage() {}
+func (*UpdateNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} }
+
+// RemoveNodeRequest requests to delete the specified node from store.
+type RemoveNodeRequest struct {
+ NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+}
+
+func (m *RemoveNodeRequest) Reset() { *m = RemoveNodeRequest{} }
+func (*RemoveNodeRequest) ProtoMessage() {}
+func (*RemoveNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} }
+
+type RemoveNodeResponse struct {
+}
+
+func (m *RemoveNodeResponse) Reset() { *m = RemoveNodeResponse{} }
+func (*RemoveNodeResponse) ProtoMessage() {}
+func (*RemoveNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} }
+
+type GetTaskRequest struct {
+ TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
+}
+
+func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} }
+func (*GetTaskRequest) ProtoMessage() {}
+func (*GetTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} }
+
+type GetTaskResponse struct {
+ Task *Task `protobuf:"bytes,1,opt,name=task" json:"task,omitempty"`
+}
+
+func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} }
+func (*GetTaskResponse) ProtoMessage() {}
+func (*GetTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} }
+
+type RemoveTaskRequest struct {
+ TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
+}
+
+func (m *RemoveTaskRequest) Reset() { *m = RemoveTaskRequest{} }
+func (*RemoveTaskRequest) ProtoMessage() {}
+func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} }
+
+type RemoveTaskResponse struct {
+}
+
+func (m *RemoveTaskResponse) Reset() { *m = RemoveTaskResponse{} }
+func (*RemoveTaskResponse) ProtoMessage() {}
+func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} }
+
+type ListTasksRequest struct {
+ Filters *ListTasksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} }
+func (*ListTasksRequest) ProtoMessage() {}
+func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} }
+
+type ListTasksRequest_Filters struct {
+ Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"`
+ IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"`
+ Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ ServiceIDs []string `protobuf:"bytes,4,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"`
+ NodeIDs []string `protobuf:"bytes,5,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"`
+ DesiredStates []TaskState `protobuf:"varint,6,rep,name=desired_states,json=desiredStates,enum=docker.swarmkit.v1.TaskState" json:"desired_states,omitempty"`
+}
+
+func (m *ListTasksRequest_Filters) Reset() { *m = ListTasksRequest_Filters{} }
+func (*ListTasksRequest_Filters) ProtoMessage() {}
+func (*ListTasksRequest_Filters) Descriptor() ([]byte, []int) {
+ return fileDescriptorControl, []int{12, 0}
+}
+
+type ListTasksResponse struct {
+ Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"`
+}
+
+func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} }
+func (*ListTasksResponse) ProtoMessage() {}
+func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} }
+
+type CreateServiceRequest struct {
+ Spec *ServiceSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"`
+}
+
+func (m *CreateServiceRequest) Reset() { *m = CreateServiceRequest{} }
+func (*CreateServiceRequest) ProtoMessage() {}
+func (*CreateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} }
+
+type CreateServiceResponse struct {
+ Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
+}
+
+func (m *CreateServiceResponse) Reset() { *m = CreateServiceResponse{} }
+func (*CreateServiceResponse) ProtoMessage() {}
+func (*CreateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} }
+
+type GetServiceRequest struct {
+ ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+}
+
+func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} }
+func (*GetServiceRequest) ProtoMessage() {}
+func (*GetServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{16} }
+
+type GetServiceResponse struct {
+ Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
+}
+
+func (m *GetServiceResponse) Reset() { *m = GetServiceResponse{} }
+func (*GetServiceResponse) ProtoMessage() {}
+func (*GetServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{17} }
+
+type UpdateServiceRequest struct {
+ ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ ServiceVersion *Version `protobuf:"bytes,2,opt,name=service_version,json=serviceVersion" json:"service_version,omitempty"`
+ Spec *ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"`
+}
+
+func (m *UpdateServiceRequest) Reset() { *m = UpdateServiceRequest{} }
+func (*UpdateServiceRequest) ProtoMessage() {}
+func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{18} }
+
+type UpdateServiceResponse struct {
+ Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
+}
+
+func (m *UpdateServiceResponse) Reset() { *m = UpdateServiceResponse{} }
+func (*UpdateServiceResponse) ProtoMessage() {}
+func (*UpdateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{19} }
+
+type RemoveServiceRequest struct {
+ ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+}
+
+func (m *RemoveServiceRequest) Reset() { *m = RemoveServiceRequest{} }
+func (*RemoveServiceRequest) ProtoMessage() {}
+func (*RemoveServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{20} }
+
+type RemoveServiceResponse struct {
+}
+
+func (m *RemoveServiceResponse) Reset() { *m = RemoveServiceResponse{} }
+func (*RemoveServiceResponse) ProtoMessage() {}
+func (*RemoveServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{21} }
+
+type ListServicesRequest struct {
+ Filters *ListServicesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} }
+func (*ListServicesRequest) ProtoMessage() {}
+func (*ListServicesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{22} }
+
+type ListServicesRequest_Filters struct {
+ Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"`
+ IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"`
+ Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *ListServicesRequest_Filters) Reset() { *m = ListServicesRequest_Filters{} }
+func (*ListServicesRequest_Filters) ProtoMessage() {}
+func (*ListServicesRequest_Filters) Descriptor() ([]byte, []int) {
+ return fileDescriptorControl, []int{22, 0}
+}
+
+type ListServicesResponse struct {
+ Services []*Service `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"`
+}
+
+func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} }
+func (*ListServicesResponse) ProtoMessage() {}
+func (*ListServicesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{23} }
+
+type CreateNetworkRequest struct {
+ Spec *NetworkSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"`
+}
+
+func (m *CreateNetworkRequest) Reset() { *m = CreateNetworkRequest{} }
+func (*CreateNetworkRequest) ProtoMessage() {}
+func (*CreateNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{24} }
+
+type CreateNetworkResponse struct {
+ Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"`
+}
+
+func (m *CreateNetworkResponse) Reset() { *m = CreateNetworkResponse{} }
+func (*CreateNetworkResponse) ProtoMessage() {}
+func (*CreateNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{25} }
+
+type GetNetworkRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
+}
+
+func (m *GetNetworkRequest) Reset() { *m = GetNetworkRequest{} }
+func (*GetNetworkRequest) ProtoMessage() {}
+func (*GetNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{26} }
+
+type GetNetworkResponse struct {
+ Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"`
+}
+
+func (m *GetNetworkResponse) Reset() { *m = GetNetworkResponse{} }
+func (*GetNetworkResponse) ProtoMessage() {}
+func (*GetNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{27} }
+
+type RemoveNetworkRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
+}
+
+func (m *RemoveNetworkRequest) Reset() { *m = RemoveNetworkRequest{} }
+func (*RemoveNetworkRequest) ProtoMessage() {}
+func (*RemoveNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{28} }
+
+type RemoveNetworkResponse struct {
+}
+
+func (m *RemoveNetworkResponse) Reset() { *m = RemoveNetworkResponse{} }
+func (*RemoveNetworkResponse) ProtoMessage() {}
+func (*RemoveNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{29} }
+
+type ListNetworksRequest struct {
+ Filters *ListNetworksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListNetworksRequest) Reset() { *m = ListNetworksRequest{} }
+func (*ListNetworksRequest) ProtoMessage() {}
+func (*ListNetworksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{30} }
+
+type ListNetworksRequest_Filters struct {
+ Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"`
+ IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"`
+ Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *ListNetworksRequest_Filters) Reset() { *m = ListNetworksRequest_Filters{} }
+func (*ListNetworksRequest_Filters) ProtoMessage() {}
+func (*ListNetworksRequest_Filters) Descriptor() ([]byte, []int) {
+ return fileDescriptorControl, []int{30, 0}
+}
+
+type ListNetworksResponse struct {
+ Networks []*Network `protobuf:"bytes,1,rep,name=networks" json:"networks,omitempty"`
+}
+
+func (m *ListNetworksResponse) Reset() { *m = ListNetworksResponse{} }
+func (*ListNetworksResponse) ProtoMessage() {}
+func (*ListNetworksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{31} }
+
+type RemoveManagerResponse struct {
+}
+
+func (m *RemoveManagerResponse) Reset() { *m = RemoveManagerResponse{} }
+func (*RemoveManagerResponse) ProtoMessage() {}
+func (*RemoveManagerResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{32} }
+
+type GetClusterRequest struct {
+ ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+}
+
+func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} }
+func (*GetClusterRequest) ProtoMessage() {}
+func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{33} }
+
+type GetClusterResponse struct {
+ Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
+}
+
+func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} }
+func (*GetClusterResponse) ProtoMessage() {}
+func (*GetClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{34} }
+
+type ListClustersRequest struct {
+ Filters *ListClustersRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} }
+func (*ListClustersRequest) ProtoMessage() {}
+func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{35} }
+
+type ListClustersRequest_Filters struct {
+ Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"`
+ IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"`
+ Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *ListClustersRequest_Filters) Reset() { *m = ListClustersRequest_Filters{} }
+func (*ListClustersRequest_Filters) ProtoMessage() {}
+func (*ListClustersRequest_Filters) Descriptor() ([]byte, []int) {
+ return fileDescriptorControl, []int{35, 0}
+}
+
+type ListClustersResponse struct {
+ Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"`
+}
+
+func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} }
+func (*ListClustersResponse) ProtoMessage() {}
+func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{36} }
+
+type UpdateClusterRequest struct {
+ // ClusterID is the cluster ID to update.
+ ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ // ClusterVersion is the version of the cluster being updated.
+ ClusterVersion *Version `protobuf:"bytes,2,opt,name=cluster_version,json=clusterVersion" json:"cluster_version,omitempty"`
+ // Spec is the new spec to apply to the cluster.
+ Spec *ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"`
+}
+
+func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} }
+func (*UpdateClusterRequest) ProtoMessage() {}
+func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{37} }
+
+type UpdateClusterResponse struct {
+ Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
+}
+
+func (m *UpdateClusterResponse) Reset() { *m = UpdateClusterResponse{} }
+func (*UpdateClusterResponse) ProtoMessage() {}
+func (*UpdateClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{38} }
+
+func init() {
+ proto.RegisterType((*GetNodeRequest)(nil), "docker.swarmkit.v1.GetNodeRequest")
+ proto.RegisterType((*GetNodeResponse)(nil), "docker.swarmkit.v1.GetNodeResponse")
+ proto.RegisterType((*ListNodesRequest)(nil), "docker.swarmkit.v1.ListNodesRequest")
+ proto.RegisterType((*ListNodesRequest_Filters)(nil), "docker.swarmkit.v1.ListNodesRequest.Filters")
+ proto.RegisterType((*ListNodesResponse)(nil), "docker.swarmkit.v1.ListNodesResponse")
+ proto.RegisterType((*UpdateNodeRequest)(nil), "docker.swarmkit.v1.UpdateNodeRequest")
+ proto.RegisterType((*UpdateNodeResponse)(nil), "docker.swarmkit.v1.UpdateNodeResponse")
+ proto.RegisterType((*RemoveNodeRequest)(nil), "docker.swarmkit.v1.RemoveNodeRequest")
+ proto.RegisterType((*RemoveNodeResponse)(nil), "docker.swarmkit.v1.RemoveNodeResponse")
+ proto.RegisterType((*GetTaskRequest)(nil), "docker.swarmkit.v1.GetTaskRequest")
+ proto.RegisterType((*GetTaskResponse)(nil), "docker.swarmkit.v1.GetTaskResponse")
+ proto.RegisterType((*RemoveTaskRequest)(nil), "docker.swarmkit.v1.RemoveTaskRequest")
+ proto.RegisterType((*RemoveTaskResponse)(nil), "docker.swarmkit.v1.RemoveTaskResponse")
+ proto.RegisterType((*ListTasksRequest)(nil), "docker.swarmkit.v1.ListTasksRequest")
+ proto.RegisterType((*ListTasksRequest_Filters)(nil), "docker.swarmkit.v1.ListTasksRequest.Filters")
+ proto.RegisterType((*ListTasksResponse)(nil), "docker.swarmkit.v1.ListTasksResponse")
+ proto.RegisterType((*CreateServiceRequest)(nil), "docker.swarmkit.v1.CreateServiceRequest")
+ proto.RegisterType((*CreateServiceResponse)(nil), "docker.swarmkit.v1.CreateServiceResponse")
+ proto.RegisterType((*GetServiceRequest)(nil), "docker.swarmkit.v1.GetServiceRequest")
+ proto.RegisterType((*GetServiceResponse)(nil), "docker.swarmkit.v1.GetServiceResponse")
+ proto.RegisterType((*UpdateServiceRequest)(nil), "docker.swarmkit.v1.UpdateServiceRequest")
+ proto.RegisterType((*UpdateServiceResponse)(nil), "docker.swarmkit.v1.UpdateServiceResponse")
+ proto.RegisterType((*RemoveServiceRequest)(nil), "docker.swarmkit.v1.RemoveServiceRequest")
+ proto.RegisterType((*RemoveServiceResponse)(nil), "docker.swarmkit.v1.RemoveServiceResponse")
+ proto.RegisterType((*ListServicesRequest)(nil), "docker.swarmkit.v1.ListServicesRequest")
+ proto.RegisterType((*ListServicesRequest_Filters)(nil), "docker.swarmkit.v1.ListServicesRequest.Filters")
+ proto.RegisterType((*ListServicesResponse)(nil), "docker.swarmkit.v1.ListServicesResponse")
+ proto.RegisterType((*CreateNetworkRequest)(nil), "docker.swarmkit.v1.CreateNetworkRequest")
+ proto.RegisterType((*CreateNetworkResponse)(nil), "docker.swarmkit.v1.CreateNetworkResponse")
+ proto.RegisterType((*GetNetworkRequest)(nil), "docker.swarmkit.v1.GetNetworkRequest")
+ proto.RegisterType((*GetNetworkResponse)(nil), "docker.swarmkit.v1.GetNetworkResponse")
+ proto.RegisterType((*RemoveNetworkRequest)(nil), "docker.swarmkit.v1.RemoveNetworkRequest")
+ proto.RegisterType((*RemoveNetworkResponse)(nil), "docker.swarmkit.v1.RemoveNetworkResponse")
+ proto.RegisterType((*ListNetworksRequest)(nil), "docker.swarmkit.v1.ListNetworksRequest")
+ proto.RegisterType((*ListNetworksRequest_Filters)(nil), "docker.swarmkit.v1.ListNetworksRequest.Filters")
+ proto.RegisterType((*ListNetworksResponse)(nil), "docker.swarmkit.v1.ListNetworksResponse")
+ proto.RegisterType((*RemoveManagerResponse)(nil), "docker.swarmkit.v1.RemoveManagerResponse")
+ proto.RegisterType((*GetClusterRequest)(nil), "docker.swarmkit.v1.GetClusterRequest")
+ proto.RegisterType((*GetClusterResponse)(nil), "docker.swarmkit.v1.GetClusterResponse")
+ proto.RegisterType((*ListClustersRequest)(nil), "docker.swarmkit.v1.ListClustersRequest")
+ proto.RegisterType((*ListClustersRequest_Filters)(nil), "docker.swarmkit.v1.ListClustersRequest.Filters")
+ proto.RegisterType((*ListClustersResponse)(nil), "docker.swarmkit.v1.ListClustersResponse")
+ proto.RegisterType((*UpdateClusterRequest)(nil), "docker.swarmkit.v1.UpdateClusterRequest")
+ proto.RegisterType((*UpdateClusterResponse)(nil), "docker.swarmkit.v1.UpdateClusterResponse")
+}
+
+type authenticatedWrapperControlServer struct {
+ local ControlServer
+ authorize func(context.Context, []string) error
+}
+
+func NewAuthenticatedWrapperControlServer(local ControlServer, authorize func(context.Context, []string) error) ControlServer {
+ return &authenticatedWrapperControlServer{
+ local: local,
+ authorize: authorize,
+ }
+}
+
+func (p *authenticatedWrapperControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.GetNode(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.ListNodes(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.UpdateNode(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.RemoveNode(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.GetTask(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.ListTasks(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.RemoveTask(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.GetService(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.ListServices(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.CreateService(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.UpdateService(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.RemoveService(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.GetNetwork(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.ListNetworks(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.CreateNetwork(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.RemoveNetwork(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.GetCluster(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.ListClusters(ctx, r)
+}
+
+func (p *authenticatedWrapperControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.UpdateCluster(ctx, r)
+}
+
+func (m *GetNodeRequest) Copy() *GetNodeRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetNodeRequest{
+ NodeID: m.NodeID,
+ }
+
+ return o
+}
+
+func (m *GetNodeResponse) Copy() *GetNodeResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetNodeResponse{
+ Node: m.Node.Copy(),
+ }
+
+ return o
+}
+
+func (m *ListNodesRequest) Copy() *ListNodesRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListNodesRequest{
+ Filters: m.Filters.Copy(),
+ }
+
+ return o
+}
+
+func (m *ListNodesRequest_Filters) Copy() *ListNodesRequest_Filters {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListNodesRequest_Filters{}
+
+ if m.Names != nil {
+ o.Names = make([]string, 0, len(m.Names))
+ for _, v := range m.Names {
+ o.Names = append(o.Names, v)
+ }
+ }
+
+ if m.IDPrefixes != nil {
+ o.IDPrefixes = make([]string, 0, len(m.IDPrefixes))
+ for _, v := range m.IDPrefixes {
+ o.IDPrefixes = append(o.IDPrefixes, v)
+ }
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ if m.Memberships != nil {
+ o.Memberships = make([]NodeSpec_Membership, 0, len(m.Memberships))
+ for _, v := range m.Memberships {
+ o.Memberships = append(o.Memberships, v)
+ }
+ }
+
+ if m.Roles != nil {
+ o.Roles = make([]NodeRole, 0, len(m.Roles))
+ for _, v := range m.Roles {
+ o.Roles = append(o.Roles, v)
+ }
+ }
+
+ return o
+}
+
+func (m *ListNodesResponse) Copy() *ListNodesResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListNodesResponse{}
+
+ if m.Nodes != nil {
+ o.Nodes = make([]*Node, 0, len(m.Nodes))
+ for _, v := range m.Nodes {
+ o.Nodes = append(o.Nodes, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *UpdateNodeRequest) Copy() *UpdateNodeRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateNodeRequest{
+ NodeID: m.NodeID,
+ NodeVersion: m.NodeVersion.Copy(),
+ Spec: m.Spec.Copy(),
+ }
+
+ return o
+}
+
+func (m *UpdateNodeResponse) Copy() *UpdateNodeResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateNodeResponse{
+ Node: m.Node.Copy(),
+ }
+
+ return o
+}
+
+func (m *RemoveNodeRequest) Copy() *RemoveNodeRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveNodeRequest{
+ NodeID: m.NodeID,
+ }
+
+ return o
+}
+
+func (m *RemoveNodeResponse) Copy() *RemoveNodeResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveNodeResponse{}
+
+ return o
+}
+
+func (m *GetTaskRequest) Copy() *GetTaskRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetTaskRequest{
+ TaskID: m.TaskID,
+ }
+
+ return o
+}
+
+func (m *GetTaskResponse) Copy() *GetTaskResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetTaskResponse{
+ Task: m.Task.Copy(),
+ }
+
+ return o
+}
+
+func (m *RemoveTaskRequest) Copy() *RemoveTaskRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveTaskRequest{
+ TaskID: m.TaskID,
+ }
+
+ return o
+}
+
+func (m *RemoveTaskResponse) Copy() *RemoveTaskResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveTaskResponse{}
+
+ return o
+}
+
+func (m *ListTasksRequest) Copy() *ListTasksRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListTasksRequest{
+ Filters: m.Filters.Copy(),
+ }
+
+ return o
+}
+
+func (m *ListTasksRequest_Filters) Copy() *ListTasksRequest_Filters {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListTasksRequest_Filters{}
+
+ if m.Names != nil {
+ o.Names = make([]string, 0, len(m.Names))
+ for _, v := range m.Names {
+ o.Names = append(o.Names, v)
+ }
+ }
+
+ if m.IDPrefixes != nil {
+ o.IDPrefixes = make([]string, 0, len(m.IDPrefixes))
+ for _, v := range m.IDPrefixes {
+ o.IDPrefixes = append(o.IDPrefixes, v)
+ }
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ if m.ServiceIDs != nil {
+ o.ServiceIDs = make([]string, 0, len(m.ServiceIDs))
+ for _, v := range m.ServiceIDs {
+ o.ServiceIDs = append(o.ServiceIDs, v)
+ }
+ }
+
+ if m.NodeIDs != nil {
+ o.NodeIDs = make([]string, 0, len(m.NodeIDs))
+ for _, v := range m.NodeIDs {
+ o.NodeIDs = append(o.NodeIDs, v)
+ }
+ }
+
+ if m.DesiredStates != nil {
+ o.DesiredStates = make([]TaskState, 0, len(m.DesiredStates))
+ for _, v := range m.DesiredStates {
+ o.DesiredStates = append(o.DesiredStates, v)
+ }
+ }
+
+ return o
+}
+
+func (m *ListTasksResponse) Copy() *ListTasksResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListTasksResponse{}
+
+ if m.Tasks != nil {
+ o.Tasks = make([]*Task, 0, len(m.Tasks))
+ for _, v := range m.Tasks {
+ o.Tasks = append(o.Tasks, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *CreateServiceRequest) Copy() *CreateServiceRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &CreateServiceRequest{
+ Spec: m.Spec.Copy(),
+ }
+
+ return o
+}
+
+func (m *CreateServiceResponse) Copy() *CreateServiceResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &CreateServiceResponse{
+ Service: m.Service.Copy(),
+ }
+
+ return o
+}
+
+func (m *GetServiceRequest) Copy() *GetServiceRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetServiceRequest{
+ ServiceID: m.ServiceID,
+ }
+
+ return o
+}
+
+func (m *GetServiceResponse) Copy() *GetServiceResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetServiceResponse{
+ Service: m.Service.Copy(),
+ }
+
+ return o
+}
+
+func (m *UpdateServiceRequest) Copy() *UpdateServiceRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateServiceRequest{
+ ServiceID: m.ServiceID,
+ ServiceVersion: m.ServiceVersion.Copy(),
+ Spec: m.Spec.Copy(),
+ }
+
+ return o
+}
+
+func (m *UpdateServiceResponse) Copy() *UpdateServiceResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateServiceResponse{
+ Service: m.Service.Copy(),
+ }
+
+ return o
+}
+
+func (m *RemoveServiceRequest) Copy() *RemoveServiceRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveServiceRequest{
+ ServiceID: m.ServiceID,
+ }
+
+ return o
+}
+
+func (m *RemoveServiceResponse) Copy() *RemoveServiceResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveServiceResponse{}
+
+ return o
+}
+
+func (m *ListServicesRequest) Copy() *ListServicesRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListServicesRequest{
+ Filters: m.Filters.Copy(),
+ }
+
+ return o
+}
+
+func (m *ListServicesRequest_Filters) Copy() *ListServicesRequest_Filters {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListServicesRequest_Filters{}
+
+ if m.Names != nil {
+ o.Names = make([]string, 0, len(m.Names))
+ for _, v := range m.Names {
+ o.Names = append(o.Names, v)
+ }
+ }
+
+ if m.IDPrefixes != nil {
+ o.IDPrefixes = make([]string, 0, len(m.IDPrefixes))
+ for _, v := range m.IDPrefixes {
+ o.IDPrefixes = append(o.IDPrefixes, v)
+ }
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ return o
+}
+
+func (m *ListServicesResponse) Copy() *ListServicesResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListServicesResponse{}
+
+ if m.Services != nil {
+ o.Services = make([]*Service, 0, len(m.Services))
+ for _, v := range m.Services {
+ o.Services = append(o.Services, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *CreateNetworkRequest) Copy() *CreateNetworkRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &CreateNetworkRequest{
+ Spec: m.Spec.Copy(),
+ }
+
+ return o
+}
+
+func (m *CreateNetworkResponse) Copy() *CreateNetworkResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &CreateNetworkResponse{
+ Network: m.Network.Copy(),
+ }
+
+ return o
+}
+
+func (m *GetNetworkRequest) Copy() *GetNetworkRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetNetworkRequest{
+ Name: m.Name,
+ NetworkID: m.NetworkID,
+ }
+
+ return o
+}
+
+func (m *GetNetworkResponse) Copy() *GetNetworkResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetNetworkResponse{
+ Network: m.Network.Copy(),
+ }
+
+ return o
+}
+
+func (m *RemoveNetworkRequest) Copy() *RemoveNetworkRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveNetworkRequest{
+ Name: m.Name,
+ NetworkID: m.NetworkID,
+ }
+
+ return o
+}
+
+func (m *RemoveNetworkResponse) Copy() *RemoveNetworkResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveNetworkResponse{}
+
+ return o
+}
+
+func (m *ListNetworksRequest) Copy() *ListNetworksRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListNetworksRequest{
+ Filters: m.Filters.Copy(),
+ }
+
+ return o
+}
+
+func (m *ListNetworksRequest_Filters) Copy() *ListNetworksRequest_Filters {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListNetworksRequest_Filters{}
+
+ if m.Names != nil {
+ o.Names = make([]string, 0, len(m.Names))
+ for _, v := range m.Names {
+ o.Names = append(o.Names, v)
+ }
+ }
+
+ if m.IDPrefixes != nil {
+ o.IDPrefixes = make([]string, 0, len(m.IDPrefixes))
+ for _, v := range m.IDPrefixes {
+ o.IDPrefixes = append(o.IDPrefixes, v)
+ }
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ return o
+}
+
+func (m *ListNetworksResponse) Copy() *ListNetworksResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListNetworksResponse{}
+
+ if m.Networks != nil {
+ o.Networks = make([]*Network, 0, len(m.Networks))
+ for _, v := range m.Networks {
+ o.Networks = append(o.Networks, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *RemoveManagerResponse) Copy() *RemoveManagerResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &RemoveManagerResponse{}
+
+ return o
+}
+
+func (m *GetClusterRequest) Copy() *GetClusterRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetClusterRequest{
+ ClusterID: m.ClusterID,
+ }
+
+ return o
+}
+
+func (m *GetClusterResponse) Copy() *GetClusterResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &GetClusterResponse{
+ Cluster: m.Cluster.Copy(),
+ }
+
+ return o
+}
+
+func (m *ListClustersRequest) Copy() *ListClustersRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListClustersRequest{
+ Filters: m.Filters.Copy(),
+ }
+
+ return o
+}
+
+func (m *ListClustersRequest_Filters) Copy() *ListClustersRequest_Filters {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListClustersRequest_Filters{}
+
+ if m.Names != nil {
+ o.Names = make([]string, 0, len(m.Names))
+ for _, v := range m.Names {
+ o.Names = append(o.Names, v)
+ }
+ }
+
+ if m.IDPrefixes != nil {
+ o.IDPrefixes = make([]string, 0, len(m.IDPrefixes))
+ for _, v := range m.IDPrefixes {
+ o.IDPrefixes = append(o.IDPrefixes, v)
+ }
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ return o
+}
+
+func (m *ListClustersResponse) Copy() *ListClustersResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &ListClustersResponse{}
+
+ if m.Clusters != nil {
+ o.Clusters = make([]*Cluster, 0, len(m.Clusters))
+ for _, v := range m.Clusters {
+ o.Clusters = append(o.Clusters, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *UpdateClusterRequest) Copy() *UpdateClusterRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateClusterRequest{
+ ClusterID: m.ClusterID,
+ ClusterVersion: m.ClusterVersion.Copy(),
+ Spec: m.Spec.Copy(),
+ }
+
+ return o
+}
+
+func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateClusterResponse{
+ Cluster: m.Cluster.Copy(),
+ }
+
+ return o
+}
+
+func (this *GetNodeRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetNodeRequest{")
+ s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetNodeResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetNodeResponse{")
+ if this.Node != nil {
+ s = append(s, "Node: "+fmt.Sprintf("%#v", this.Node)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListNodesRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListNodesRequest{")
+ if this.Filters != nil {
+ s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListNodesRequest_Filters) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.ListNodesRequest_Filters{")
+ s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n")
+ s = append(s, "IDPrefixes: "+fmt.Sprintf("%#v", this.IDPrefixes)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "Memberships: "+fmt.Sprintf("%#v", this.Memberships)+",\n")
+ s = append(s, "Roles: "+fmt.Sprintf("%#v", this.Roles)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListNodesResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListNodesResponse{")
+ if this.Nodes != nil {
+ s = append(s, "Nodes: "+fmt.Sprintf("%#v", this.Nodes)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateNodeRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.UpdateNodeRequest{")
+ s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+ if this.NodeVersion != nil {
+ s = append(s, "NodeVersion: "+fmt.Sprintf("%#v", this.NodeVersion)+",\n")
+ }
+ if this.Spec != nil {
+ s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateNodeResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.UpdateNodeResponse{")
+ if this.Node != nil {
+ s = append(s, "Node: "+fmt.Sprintf("%#v", this.Node)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveNodeRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.RemoveNodeRequest{")
+ s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveNodeResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.RemoveNodeResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetTaskRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetTaskRequest{")
+ s = append(s, "TaskID: "+fmt.Sprintf("%#v", this.TaskID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetTaskResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetTaskResponse{")
+ if this.Task != nil {
+ s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveTaskRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.RemoveTaskRequest{")
+ s = append(s, "TaskID: "+fmt.Sprintf("%#v", this.TaskID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveTaskResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.RemoveTaskResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListTasksRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListTasksRequest{")
+ if this.Filters != nil {
+ s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListTasksRequest_Filters) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&api.ListTasksRequest_Filters{")
+ s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n")
+ s = append(s, "IDPrefixes: "+fmt.Sprintf("%#v", this.IDPrefixes)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "ServiceIDs: "+fmt.Sprintf("%#v", this.ServiceIDs)+",\n")
+ s = append(s, "NodeIDs: "+fmt.Sprintf("%#v", this.NodeIDs)+",\n")
+ s = append(s, "DesiredStates: "+fmt.Sprintf("%#v", this.DesiredStates)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListTasksResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListTasksResponse{")
+ if this.Tasks != nil {
+ s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CreateServiceRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.CreateServiceRequest{")
+ if this.Spec != nil {
+ s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CreateServiceResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.CreateServiceResponse{")
+ if this.Service != nil {
+ s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetServiceRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetServiceRequest{")
+ s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetServiceResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetServiceResponse{")
+ if this.Service != nil {
+ s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateServiceRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.UpdateServiceRequest{")
+ s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n")
+ if this.ServiceVersion != nil {
+ s = append(s, "ServiceVersion: "+fmt.Sprintf("%#v", this.ServiceVersion)+",\n")
+ }
+ if this.Spec != nil {
+ s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateServiceResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.UpdateServiceResponse{")
+ if this.Service != nil {
+ s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveServiceRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.RemoveServiceRequest{")
+ s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveServiceResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.RemoveServiceResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListServicesRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListServicesRequest{")
+ if this.Filters != nil {
+ s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListServicesRequest_Filters) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.ListServicesRequest_Filters{")
+ s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n")
+ s = append(s, "IDPrefixes: "+fmt.Sprintf("%#v", this.IDPrefixes)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListServicesResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListServicesResponse{")
+ if this.Services != nil {
+ s = append(s, "Services: "+fmt.Sprintf("%#v", this.Services)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CreateNetworkRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.CreateNetworkRequest{")
+ if this.Spec != nil {
+ s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CreateNetworkResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.CreateNetworkResponse{")
+ if this.Network != nil {
+ s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetNetworkRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.GetNetworkRequest{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetNetworkResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetNetworkResponse{")
+ if this.Network != nil {
+ s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveNetworkRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.RemoveNetworkRequest{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveNetworkResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.RemoveNetworkResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListNetworksRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListNetworksRequest{")
+ if this.Filters != nil {
+ s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListNetworksRequest_Filters) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.ListNetworksRequest_Filters{")
+ s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n")
+ s = append(s, "IDPrefixes: "+fmt.Sprintf("%#v", this.IDPrefixes)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListNetworksResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListNetworksResponse{")
+ if this.Networks != nil {
+ s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RemoveManagerResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.RemoveManagerResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetClusterRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetClusterRequest{")
+ s = append(s, "ClusterID: "+fmt.Sprintf("%#v", this.ClusterID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetClusterResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.GetClusterResponse{")
+ if this.Cluster != nil {
+ s = append(s, "Cluster: "+fmt.Sprintf("%#v", this.Cluster)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListClustersRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListClustersRequest{")
+ if this.Filters != nil {
+ s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListClustersRequest_Filters) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.ListClustersRequest_Filters{")
+ s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n")
+ s = append(s, "IDPrefixes: "+fmt.Sprintf("%#v", this.IDPrefixes)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ListClustersResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ListClustersResponse{")
+ if this.Clusters != nil {
+ s = append(s, "Clusters: "+fmt.Sprintf("%#v", this.Clusters)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateClusterRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.UpdateClusterRequest{")
+ s = append(s, "ClusterID: "+fmt.Sprintf("%#v", this.ClusterID)+",\n")
+ if this.ClusterVersion != nil {
+ s = append(s, "ClusterVersion: "+fmt.Sprintf("%#v", this.ClusterVersion)+",\n")
+ }
+ if this.Spec != nil {
+ s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateClusterResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.UpdateClusterResponse{")
+ if this.Cluster != nil {
+ s = append(s, "Cluster: "+fmt.Sprintf("%#v", this.Cluster)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringControl(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringControl(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion2
+
+// Client API for Control service
+
+type ControlClient interface {
+ GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error)
+ ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
+ UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error)
+ RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error)
+ GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error)
+ ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error)
+ RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error)
+ GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error)
+ ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error)
+ CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error)
+ UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error)
+ RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error)
+ GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error)
+ ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error)
+ CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error)
+ RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error)
+ GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error)
+ ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
+ UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error)
+}
+
+type controlClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewControlClient(cc *grpc.ClientConn) ControlClient {
+ return &controlClient{cc}
+}
+
+func (c *controlClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) {
+ out := new(GetNodeResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNode", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) {
+ out := new(ListNodesResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNodes", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) {
+ out := new(UpdateNodeResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateNode", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) {
+ out := new(RemoveNodeResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNode", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) {
+ out := new(GetTaskResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetTask", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) {
+ out := new(ListTasksResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListTasks", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) {
+ out := new(RemoveTaskResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveTask", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) {
+ out := new(GetServiceResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetService", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) {
+ out := new(ListServicesResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListServices", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) {
+ out := new(CreateServiceResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateService", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) {
+ out := new(UpdateServiceResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateService", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) {
+ out := new(RemoveServiceResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveService", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) {
+ out := new(GetNetworkResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNetwork", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) {
+ out := new(ListNetworksResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNetworks", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) {
+ out := new(CreateNetworkResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateNetwork", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) {
+ out := new(RemoveNetworkResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNetwork", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) {
+ out := new(GetClusterResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetCluster", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) {
+ out := new(ListClustersResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListClusters", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) {
+ out := new(UpdateClusterResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateCluster", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Control service
+
+type ControlServer interface {
+ GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error)
+ ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
+ UpdateNode(context.Context, *UpdateNodeRequest) (*UpdateNodeResponse, error)
+ RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error)
+ GetTask(context.Context, *GetTaskRequest) (*GetTaskResponse, error)
+ ListTasks(context.Context, *ListTasksRequest) (*ListTasksResponse, error)
+ RemoveTask(context.Context, *RemoveTaskRequest) (*RemoveTaskResponse, error)
+ GetService(context.Context, *GetServiceRequest) (*GetServiceResponse, error)
+ ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error)
+ CreateService(context.Context, *CreateServiceRequest) (*CreateServiceResponse, error)
+ UpdateService(context.Context, *UpdateServiceRequest) (*UpdateServiceResponse, error)
+ RemoveService(context.Context, *RemoveServiceRequest) (*RemoveServiceResponse, error)
+ GetNetwork(context.Context, *GetNetworkRequest) (*GetNetworkResponse, error)
+ ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error)
+ CreateNetwork(context.Context, *CreateNetworkRequest) (*CreateNetworkResponse, error)
+ RemoveNetwork(context.Context, *RemoveNetworkRequest) (*RemoveNetworkResponse, error)
+ GetCluster(context.Context, *GetClusterRequest) (*GetClusterResponse, error)
+ ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
+ UpdateCluster(context.Context, *UpdateClusterRequest) (*UpdateClusterResponse, error)
+}
+
+func RegisterControlServer(s *grpc.Server, srv ControlServer) {
+ s.RegisterService(&_Control_serviceDesc, srv)
+}
+
+func _Control_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).GetNode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/GetNode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).GetNode(ctx, req.(*GetNodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNodesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).ListNodes(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/ListNodes",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).ListNodes(ctx, req.(*ListNodesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_UpdateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateNodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).UpdateNode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/UpdateNode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).UpdateNode(ctx, req.(*UpdateNodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_RemoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveNodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).RemoveNode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/RemoveNode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).RemoveNode(ctx, req.(*RemoveNodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetTaskRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).GetTask(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/GetTask",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).GetTask(ctx, req.(*GetTaskRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_ListTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListTasksRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).ListTasks(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/ListTasks",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).ListTasks(ctx, req.(*ListTasksRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_RemoveTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveTaskRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).RemoveTask(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/RemoveTask",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).RemoveTask(ctx, req.(*RemoveTaskRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).GetService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/GetService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).GetService(ctx, req.(*GetServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListServicesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).ListServices(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/ListServices",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).ListServices(ctx, req.(*ListServicesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).CreateService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/CreateService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).CreateService(ctx, req.(*CreateServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).UpdateService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/UpdateService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).UpdateService(ctx, req.(*UpdateServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_RemoveService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).RemoveService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/RemoveService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).RemoveService(ctx, req.(*RemoveServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_GetNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNetworkRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).GetNetwork(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/GetNetwork",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).GetNetwork(ctx, req.(*GetNetworkRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_ListNetworks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNetworksRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).ListNetworks(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/ListNetworks",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).ListNetworks(ctx, req.(*ListNetworksRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_CreateNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNetworkRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).CreateNetwork(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/CreateNetwork",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).CreateNetwork(ctx, req.(*CreateNetworkRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_RemoveNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveNetworkRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).RemoveNetwork(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/RemoveNetwork",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).RemoveNetwork(ctx, req.(*RemoveNetworkRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetClusterRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).GetCluster(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/GetCluster",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).GetCluster(ctx, req.(*GetClusterRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListClustersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).ListClusters(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/ListClusters",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).ListClusters(ctx, req.(*ListClustersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateClusterRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).UpdateCluster(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Control/UpdateCluster",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).UpdateCluster(ctx, req.(*UpdateClusterRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Control_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "docker.swarmkit.v1.Control",
+ HandlerType: (*ControlServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetNode",
+ Handler: _Control_GetNode_Handler,
+ },
+ {
+ MethodName: "ListNodes",
+ Handler: _Control_ListNodes_Handler,
+ },
+ {
+ MethodName: "UpdateNode",
+ Handler: _Control_UpdateNode_Handler,
+ },
+ {
+ MethodName: "RemoveNode",
+ Handler: _Control_RemoveNode_Handler,
+ },
+ {
+ MethodName: "GetTask",
+ Handler: _Control_GetTask_Handler,
+ },
+ {
+ MethodName: "ListTasks",
+ Handler: _Control_ListTasks_Handler,
+ },
+ {
+ MethodName: "RemoveTask",
+ Handler: _Control_RemoveTask_Handler,
+ },
+ {
+ MethodName: "GetService",
+ Handler: _Control_GetService_Handler,
+ },
+ {
+ MethodName: "ListServices",
+ Handler: _Control_ListServices_Handler,
+ },
+ {
+ MethodName: "CreateService",
+ Handler: _Control_CreateService_Handler,
+ },
+ {
+ MethodName: "UpdateService",
+ Handler: _Control_UpdateService_Handler,
+ },
+ {
+ MethodName: "RemoveService",
+ Handler: _Control_RemoveService_Handler,
+ },
+ {
+ MethodName: "GetNetwork",
+ Handler: _Control_GetNetwork_Handler,
+ },
+ {
+ MethodName: "ListNetworks",
+ Handler: _Control_ListNetworks_Handler,
+ },
+ {
+ MethodName: "CreateNetwork",
+ Handler: _Control_CreateNetwork_Handler,
+ },
+ {
+ MethodName: "RemoveNetwork",
+ Handler: _Control_RemoveNetwork_Handler,
+ },
+ {
+ MethodName: "GetCluster",
+ Handler: _Control_GetCluster_Handler,
+ },
+ {
+ MethodName: "ListClusters",
+ Handler: _Control_ListClusters_Handler,
+ },
+ {
+ MethodName: "UpdateCluster",
+ Handler: _Control_UpdateCluster_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+}
+
+func (m *GetNodeRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetNodeRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.NodeID)))
+ i += copy(data[i:], m.NodeID)
+ }
+ return i, nil
+}
+
+func (m *GetNodeResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetNodeResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Node != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Node.Size()))
+ n1, err := m.Node.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ return i, nil
+}
+
+func (m *ListNodesRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListNodesRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Filters != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Filters.Size()))
+ n2, err := m.Filters.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func (m *ListNodesRequest_Filters) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListNodesRequest_Filters) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x1a
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ i = encodeVarintControl(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.Memberships) > 0 {
+ for _, num := range m.Memberships {
+ data[i] = 0x20
+ i++
+ i = encodeVarintControl(data, i, uint64(num))
+ }
+ }
+ if len(m.Roles) > 0 {
+ for _, num := range m.Roles {
+ data[i] = 0x28
+ i++
+ i = encodeVarintControl(data, i, uint64(num))
+ }
+ }
+ return i, nil
+}
+
+func (m *ListNodesResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListNodesResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, msg := range m.Nodes {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *UpdateNodeRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateNodeRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.NodeID)))
+ i += copy(data[i:], m.NodeID)
+ }
+ if m.NodeVersion != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(m.NodeVersion.Size()))
+ n3, err := m.NodeVersion.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ if m.Spec != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Spec.Size()))
+ n4, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ return i, nil
+}
+
+func (m *UpdateNodeResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateNodeResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Node != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Node.Size()))
+ n5, err := m.Node.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ return i, nil
+}
+
+func (m *RemoveNodeRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveNodeRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.NodeID)))
+ i += copy(data[i:], m.NodeID)
+ }
+ return i, nil
+}
+
+func (m *RemoveNodeResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveNodeResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *GetTaskRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetTaskRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.TaskID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.TaskID)))
+ i += copy(data[i:], m.TaskID)
+ }
+ return i, nil
+}
+
+func (m *GetTaskResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetTaskResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Task != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Task.Size()))
+ n6, err := m.Task.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+
+func (m *RemoveTaskRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveTaskRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.TaskID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.TaskID)))
+ i += copy(data[i:], m.TaskID)
+ }
+ return i, nil
+}
+
+func (m *RemoveTaskResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveTaskResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *ListTasksRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListTasksRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Filters != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Filters.Size()))
+ n7, err := m.Filters.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ return i, nil
+}
+
+func (m *ListTasksRequest_Filters) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListTasksRequest_Filters) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x1a
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ i = encodeVarintControl(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.ServiceIDs) > 0 {
+ for _, s := range m.ServiceIDs {
+ data[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.NodeIDs) > 0 {
+ for _, s := range m.NodeIDs {
+ data[i] = 0x2a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.DesiredStates) > 0 {
+ for _, num := range m.DesiredStates {
+ data[i] = 0x30
+ i++
+ i = encodeVarintControl(data, i, uint64(num))
+ }
+ }
+ return i, nil
+}
+
+func (m *ListTasksResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListTasksResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Tasks) > 0 {
+ for _, msg := range m.Tasks {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *CreateServiceRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CreateServiceRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Spec != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Spec.Size()))
+ n8, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+
+func (m *CreateServiceResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CreateServiceResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Service != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Service.Size()))
+ n9, err := m.Service.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ return i, nil
+}
+
+func (m *GetServiceRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetServiceRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ServiceID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.ServiceID)))
+ i += copy(data[i:], m.ServiceID)
+ }
+ return i, nil
+}
+
+func (m *GetServiceResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetServiceResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Service != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Service.Size()))
+ n10, err := m.Service.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
+ return i, nil
+}
+
+func (m *UpdateServiceRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateServiceRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ServiceID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.ServiceID)))
+ i += copy(data[i:], m.ServiceID)
+ }
+ if m.ServiceVersion != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(m.ServiceVersion.Size()))
+ n11, err := m.ServiceVersion.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ }
+ if m.Spec != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Spec.Size()))
+ n12, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ }
+ return i, nil
+}
+
+func (m *UpdateServiceResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateServiceResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Service != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Service.Size()))
+ n13, err := m.Service.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ return i, nil
+}
+
+func (m *RemoveServiceRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveServiceRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ServiceID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.ServiceID)))
+ i += copy(data[i:], m.ServiceID)
+ }
+ return i, nil
+}
+
+func (m *RemoveServiceResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveServiceResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *ListServicesRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListServicesRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Filters != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Filters.Size()))
+ n14, err := m.Filters.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ return i, nil
+}
+
+func (m *ListServicesRequest_Filters) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListServicesRequest_Filters) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x1a
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ i = encodeVarintControl(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *ListServicesResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListServicesResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Services) > 0 {
+ for _, msg := range m.Services {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *CreateNetworkRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CreateNetworkRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Spec != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Spec.Size()))
+ n15, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ }
+ return i, nil
+}
+
+func (m *CreateNetworkResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CreateNetworkResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Network != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Network.Size()))
+ n16, err := m.Network.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ return i, nil
+}
+
+func (m *GetNetworkRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetNetworkRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ if len(m.NetworkID) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.NetworkID)))
+ i += copy(data[i:], m.NetworkID)
+ }
+ return i, nil
+}
+
+func (m *GetNetworkResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetNetworkResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Network != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Network.Size()))
+ n17, err := m.Network.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ return i, nil
+}
+
+func (m *RemoveNetworkRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveNetworkRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ if len(m.NetworkID) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.NetworkID)))
+ i += copy(data[i:], m.NetworkID)
+ }
+ return i, nil
+}
+
+func (m *RemoveNetworkResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveNetworkResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *ListNetworksRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListNetworksRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Filters != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Filters.Size()))
+ n18, err := m.Filters.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ return i, nil
+}
+
+func (m *ListNetworksRequest_Filters) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListNetworksRequest_Filters) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x1a
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ i = encodeVarintControl(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *ListNetworksResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListNetworksResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Networks) > 0 {
+ for _, msg := range m.Networks {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *RemoveManagerResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RemoveManagerResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *GetClusterRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetClusterRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ClusterID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.ClusterID)))
+ i += copy(data[i:], m.ClusterID)
+ }
+ return i, nil
+}
+
+func (m *GetClusterResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GetClusterResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Cluster != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Cluster.Size()))
+ n19, err := m.Cluster.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ }
+ return i, nil
+}
+
+func (m *ListClustersRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListClustersRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Filters != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Filters.Size()))
+ n20, err := m.Filters.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ return i, nil
+}
+
+func (m *ListClustersRequest_Filters) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListClustersRequest_Filters) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x1a
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ i = encodeVarintControl(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *ListClustersResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListClustersResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Clusters) > 0 {
+ for _, msg := range m.Clusters {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *UpdateClusterRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateClusterRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ClusterID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(len(m.ClusterID)))
+ i += copy(data[i:], m.ClusterID)
+ }
+ if m.ClusterVersion != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintControl(data, i, uint64(m.ClusterVersion.Size()))
+ n21, err := m.ClusterVersion.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ }
+ if m.Spec != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Spec.Size()))
+ n22, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ }
+ return i, nil
+}
+
+func (m *UpdateClusterResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateClusterResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Cluster != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintControl(data, i, uint64(m.Cluster.Size()))
+ n23, err := m.Cluster.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ }
+ return i, nil
+}
+
+func encodeFixed64Control(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Control(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintControl(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+type raftProxyControlServer struct {
+ local ControlServer
+ connSelector *raftpicker.ConnSelector
+ cluster raftpicker.RaftCluster
+ ctxMods []func(context.Context) (context.Context, error)
+}
+
+func NewRaftProxyControlServer(local ControlServer, connSelector *raftpicker.ConnSelector, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) ControlServer {
+ redirectChecker := func(ctx context.Context) (context.Context, error) {
+ s, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ }
+ addr := s.ServerTransport().RemoteAddr().String()
+ md, ok := metadata.FromContext(ctx)
+ if ok && len(md["redirect"]) != 0 {
+ return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ }
+ if !ok {
+ md = metadata.New(map[string]string{})
+ }
+ md["redirect"] = append(md["redirect"], addr)
+ return metadata.NewContext(ctx, md), nil
+ }
+ mods := []func(context.Context) (context.Context, error){redirectChecker}
+ mods = append(mods, ctxMod)
+
+ return &raftProxyControlServer{
+ local: local,
+ cluster: cluster,
+ connSelector: connSelector,
+ ctxMods: mods,
+ }
+}
+func (p *raftProxyControlServer) runCtxMods(ctx context.Context) (context.Context, error) {
+ var err error
+ for _, mod := range p.ctxMods {
+ ctx, err = mod(ctx)
+ if err != nil {
+ return ctx, err
+ }
+ }
+ return ctx, nil
+}
+
+func (p *raftProxyControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.GetNode(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).GetNode(ctx, r)
+}
+
+func (p *raftProxyControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.ListNodes(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).ListNodes(ctx, r)
+}
+
+func (p *raftProxyControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.UpdateNode(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).UpdateNode(ctx, r)
+}
+
+func (p *raftProxyControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.RemoveNode(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).RemoveNode(ctx, r)
+}
+
+func (p *raftProxyControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.GetTask(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).GetTask(ctx, r)
+}
+
+func (p *raftProxyControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.ListTasks(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).ListTasks(ctx, r)
+}
+
+func (p *raftProxyControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.RemoveTask(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).RemoveTask(ctx, r)
+}
+
+func (p *raftProxyControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.GetService(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).GetService(ctx, r)
+}
+
+func (p *raftProxyControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.ListServices(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).ListServices(ctx, r)
+}
+
+func (p *raftProxyControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.CreateService(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).CreateService(ctx, r)
+}
+
+func (p *raftProxyControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.UpdateService(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).UpdateService(ctx, r)
+}
+
+func (p *raftProxyControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.RemoveService(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).RemoveService(ctx, r)
+}
+
+func (p *raftProxyControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.GetNetwork(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).GetNetwork(ctx, r)
+}
+
+func (p *raftProxyControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.ListNetworks(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).ListNetworks(ctx, r)
+}
+
+func (p *raftProxyControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.CreateNetwork(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).CreateNetwork(ctx, r)
+}
+
+func (p *raftProxyControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.RemoveNetwork(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).RemoveNetwork(ctx, r)
+}
+
+func (p *raftProxyControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.GetCluster(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).GetCluster(ctx, r)
+}
+
+func (p *raftProxyControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.ListClusters(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).ListClusters(ctx, r)
+}
+
+func (p *raftProxyControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.UpdateCluster(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewControlClient(conn).UpdateCluster(ctx, r)
+}
+
+func (m *GetNodeRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NodeID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *GetNodeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Node != nil {
+ l = m.Node.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *ListNodesRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Filters != nil {
+ l = m.Filters.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *ListNodesRequest_Filters) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Memberships) > 0 {
+ for _, e := range m.Memberships {
+ n += 1 + sovControl(uint64(e))
+ }
+ }
+ if len(m.Roles) > 0 {
+ for _, e := range m.Roles {
+ n += 1 + sovControl(uint64(e))
+ }
+ }
+ return n
+}
+
+func (m *ListNodesResponse) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, e := range m.Nodes {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *UpdateNodeRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NodeID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.NodeVersion != nil {
+ l = m.NodeVersion.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Spec != nil {
+ l = m.Spec.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *UpdateNodeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Node != nil {
+ l = m.Node.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveNodeRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NodeID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveNodeResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *GetTaskRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.TaskID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *GetTaskResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Task != nil {
+ l = m.Task.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveTaskRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.TaskID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveTaskResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *ListTasksRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Filters != nil {
+ l = m.Filters.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *ListTasksRequest_Filters) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if len(m.ServiceIDs) > 0 {
+ for _, s := range m.ServiceIDs {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.NodeIDs) > 0 {
+ for _, s := range m.NodeIDs {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.DesiredStates) > 0 {
+ for _, e := range m.DesiredStates {
+ n += 1 + sovControl(uint64(e))
+ }
+ }
+ return n
+}
+
+func (m *ListTasksResponse) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Tasks) > 0 {
+ for _, e := range m.Tasks {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CreateServiceRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Spec != nil {
+ l = m.Spec.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *CreateServiceResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *GetServiceRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ServiceID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *GetServiceResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *UpdateServiceRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ServiceID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.ServiceVersion != nil {
+ l = m.ServiceVersion.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Spec != nil {
+ l = m.Spec.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *UpdateServiceResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveServiceRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ServiceID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveServiceResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *ListServicesRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Filters != nil {
+ l = m.Filters.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *ListServicesRequest_Filters) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ListServicesResponse) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Services) > 0 {
+ for _, e := range m.Services {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CreateNetworkRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Spec != nil {
+ l = m.Spec.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *CreateNetworkResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Network != nil {
+ l = m.Network.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *GetNetworkRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.NetworkID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *GetNetworkResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Network != nil {
+ l = m.Network.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveNetworkRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.NetworkID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *RemoveNetworkResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *ListNetworksRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Filters != nil {
+ l = m.Filters.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *ListNetworksRequest_Filters) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ListNetworksResponse) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Networks) > 0 {
+ for _, e := range m.Networks {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RemoveManagerResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *GetClusterRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ClusterID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *GetClusterResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Cluster != nil {
+ l = m.Cluster.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *ListClustersRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Filters != nil {
+ l = m.Filters.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *ListClustersRequest_Filters) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.IDPrefixes) > 0 {
+ for _, s := range m.IDPrefixes {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ListClustersResponse) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Clusters) > 0 {
+ for _, e := range m.Clusters {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *UpdateClusterRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ClusterID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.ClusterVersion != nil {
+ l = m.ClusterVersion.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Spec != nil {
+ l = m.Spec.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func (m *UpdateClusterResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Cluster != nil {
+ l = m.Cluster.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ return n
+}
+
+func sovControl(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozControl(x uint64) (n int) {
+ return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *GetNodeRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetNodeRequest{`,
+ `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetNodeResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetNodeResponse{`,
+ `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListNodesRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListNodesRequest{`,
+ `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNodesRequest_Filters", "ListNodesRequest_Filters", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListNodesRequest_Filters) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&ListNodesRequest_Filters{`,
+ `Names:` + fmt.Sprintf("%v", this.Names) + `,`,
+ `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `Memberships:` + fmt.Sprintf("%v", this.Memberships) + `,`,
+ `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListNodesResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListNodesResponse{`,
+ `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateNodeRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateNodeRequest{`,
+ `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+ `NodeVersion:` + strings.Replace(fmt.Sprintf("%v", this.NodeVersion), "Version", "Version", 1) + `,`,
+ `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NodeSpec", "NodeSpec", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateNodeResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateNodeResponse{`,
+ `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveNodeRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveNodeRequest{`,
+ `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveNodeResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveNodeResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetTaskRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetTaskRequest{`,
+ `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetTaskResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetTaskResponse{`,
+ `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveTaskRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveTaskRequest{`,
+ `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveTaskResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveTaskResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListTasksRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListTasksRequest{`,
+ `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListTasksRequest_Filters", "ListTasksRequest_Filters", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListTasksRequest_Filters) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&ListTasksRequest_Filters{`,
+ `Names:` + fmt.Sprintf("%v", this.Names) + `,`,
+ `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`,
+ `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`,
+ `DesiredStates:` + fmt.Sprintf("%v", this.DesiredStates) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListTasksResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListTasksResponse{`,
+ `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CreateServiceRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CreateServiceRequest{`,
+ `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CreateServiceResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CreateServiceResponse{`,
+ `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetServiceRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetServiceRequest{`,
+ `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetServiceResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetServiceResponse{`,
+ `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateServiceRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateServiceRequest{`,
+ `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`,
+ `ServiceVersion:` + strings.Replace(fmt.Sprintf("%v", this.ServiceVersion), "Version", "Version", 1) + `,`,
+ `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateServiceResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateServiceResponse{`,
+ `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveServiceRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveServiceRequest{`,
+ `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveServiceResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveServiceResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListServicesRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListServicesRequest{`,
+ `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListServicesRequest_Filters", "ListServicesRequest_Filters", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListServicesRequest_Filters) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&ListServicesRequest_Filters{`,
+ `Names:` + fmt.Sprintf("%v", this.Names) + `,`,
+ `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListServicesResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListServicesResponse{`,
+ `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CreateNetworkRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CreateNetworkRequest{`,
+ `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NetworkSpec", "NetworkSpec", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CreateNetworkResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CreateNetworkResponse{`,
+ `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetNetworkRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetNetworkRequest{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetNetworkResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetNetworkResponse{`,
+ `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveNetworkRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveNetworkRequest{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveNetworkResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveNetworkResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListNetworksRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListNetworksRequest{`,
+ `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNetworksRequest_Filters", "ListNetworksRequest_Filters", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListNetworksRequest_Filters) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&ListNetworksRequest_Filters{`,
+ `Names:` + fmt.Sprintf("%v", this.Names) + `,`,
+ `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListNetworksResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListNetworksResponse{`,
+ `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RemoveManagerResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RemoveManagerResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetClusterRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetClusterRequest{`,
+ `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetClusterResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetClusterResponse{`,
+ `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListClustersRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListClustersRequest{`,
+ `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListClustersRequest_Filters", "ListClustersRequest_Filters", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListClustersRequest_Filters) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&ListClustersRequest_Filters{`,
+ `Names:` + fmt.Sprintf("%v", this.Names) + `,`,
+ `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ListClustersResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ListClustersResponse{`,
+ `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateClusterRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateClusterRequest{`,
+ `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`,
+ `ClusterVersion:` + strings.Replace(fmt.Sprintf("%v", this.ClusterVersion), "Version", "Version", 1) + `,`,
+ `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ClusterSpec", "ClusterSpec", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateClusterResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateClusterResponse{`,
+ `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringControl(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *GetNodeRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetNodeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetNodeResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetNodeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Node == nil {
+ m.Node = &Node{}
+ }
+ if err := m.Node.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListNodesRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListNodesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListNodesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Filters == nil {
+ m.Filters = &ListNodesRequest_Filters{}
+ }
+ if err := m.Filters.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListNodesRequest_Filters) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Filters: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Names = append(m.Names, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IDPrefixes = append(m.IDPrefixes, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Memberships", wireType)
+ }
+ var v NodeSpec_Membership
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (NodeSpec_Membership(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Memberships = append(m.Memberships, v)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var v NodeRole
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (NodeRole(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Roles = append(m.Roles, v)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListNodesResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListNodesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListNodesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Nodes = append(m.Nodes, &Node{})
+ if err := m.Nodes[len(m.Nodes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateNodeRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateNodeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeVersion", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeVersion == nil {
+ m.NodeVersion = &Version{}
+ }
+ if err := m.NodeVersion.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Spec == nil {
+ m.Spec = &NodeSpec{}
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateNodeResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateNodeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Node == nil {
+ m.Node = &Node{}
+ }
+ if err := m.Node.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveNodeRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveNodeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveNodeResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveNodeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetTaskRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetTaskRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TaskID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetTaskResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetTaskResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Task == nil {
+ m.Task = &Task{}
+ }
+ if err := m.Task.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveTaskRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveTaskRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TaskID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveTaskResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveTaskResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListTasksRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Filters == nil {
+ m.Filters = &ListTasksRequest_Filters{}
+ }
+ if err := m.Filters.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListTasksRequest_Filters) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Filters: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Names = append(m.Names, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IDPrefixes = append(m.IDPrefixes, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceIDs = append(m.ServiceIDs, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeIDs = append(m.NodeIDs, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredStates", wireType)
+ }
+ var v TaskState
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (TaskState(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.DesiredStates = append(m.DesiredStates, v)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListTasksResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tasks = append(m.Tasks, &Task{})
+ if err := m.Tasks[len(m.Tasks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CreateServiceRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CreateServiceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CreateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Spec == nil {
+ m.Spec = &ServiceSpec{}
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CreateServiceResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CreateServiceResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CreateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Service == nil {
+ m.Service = &Service{}
+ }
+ if err := m.Service.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetServiceRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetServiceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetServiceResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetServiceResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Service == nil {
+ m.Service = &Service{}
+ }
+ if err := m.Service.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateServiceRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateServiceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceVersion", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ServiceVersion == nil {
+ m.ServiceVersion = &Version{}
+ }
+ if err := m.ServiceVersion.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Spec == nil {
+ m.Spec = &ServiceSpec{}
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateServiceResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateServiceResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Service == nil {
+ m.Service = &Service{}
+ }
+ if err := m.Service.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveServiceRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveServiceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveServiceResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveServiceResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListServicesRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListServicesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListServicesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Filters == nil {
+ m.Filters = &ListServicesRequest_Filters{}
+ }
+ if err := m.Filters.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListServicesRequest_Filters) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Filters: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Names = append(m.Names, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IDPrefixes = append(m.IDPrefixes, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListServicesResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListServicesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListServicesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Services = append(m.Services, &Service{})
+ if err := m.Services[len(m.Services)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CreateNetworkRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CreateNetworkRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CreateNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Spec == nil {
+ m.Spec = &NetworkSpec{}
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CreateNetworkResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CreateNetworkResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CreateNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Network == nil {
+ m.Network = &Network{}
+ }
+ if err := m.Network.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetNetworkRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetNetworkRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetNetworkResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetNetworkResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Network == nil {
+ m.Network = &Network{}
+ }
+ if err := m.Network.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveNetworkRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveNetworkRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveNetworkResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveNetworkResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListNetworksRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListNetworksRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListNetworksRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Filters == nil {
+ m.Filters = &ListNetworksRequest_Filters{}
+ }
+ if err := m.Filters.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListNetworksRequest_Filters) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Filters: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Names = append(m.Names, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IDPrefixes = append(m.IDPrefixes, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListNetworksResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListNetworksResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListNetworksResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Networks = append(m.Networks, &Network{})
+ if err := m.Networks[len(m.Networks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RemoveManagerResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RemoveManagerResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RemoveManagerResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetClusterRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetClusterRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetClusterResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetClusterResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Cluster == nil {
+ m.Cluster = &Cluster{}
+ }
+ if err := m.Cluster.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListClustersRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListClustersRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Filters == nil {
+ m.Filters = &ListClustersRequest_Filters{}
+ }
+ if err := m.Filters.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListClustersRequest_Filters) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Filters: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Names = append(m.Names, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IDPrefixes = append(m.IDPrefixes, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListClustersResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListClustersResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Clusters = append(m.Clusters, &Cluster{})
+ if err := m.Clusters[len(m.Clusters)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateClusterRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateClusterRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersion", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterVersion == nil {
+ m.ClusterVersion = &Version{}
+ }
+ if err := m.ClusterVersion.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Spec == nil {
+ m.Spec = &ClusterSpec{}
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateClusterResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateClusterResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Cluster == nil {
+ m.Cluster = &Cluster{}
+ }
+ if err := m.Cluster.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipControl(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthControl
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipControl(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowControl = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorControl = []byte{
+ // 1391 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1b, 0x45,
+ 0x14, 0xc7, 0x4e, 0x1b, 0xd7, 0xcf, 0x75, 0xd2, 0x4c, 0x1d, 0x11, 0xb9, 0x21, 0x45, 0x5b, 0x48,
+ 0x13, 0x29, 0x38, 0xe0, 0xa8, 0x22, 0x80, 0x04, 0x22, 0x09, 0x45, 0x16, 0x6d, 0xa8, 0x36, 0x14,
+ 0x71, 0x8b, 0x1c, 0x7b, 0x1a, 0x96, 0xd8, 0x5e, 0xb3, 0xbb, 0x49, 0x1b, 0x71, 0xe1, 0x63, 0xf0,
+ 0x19, 0x38, 0x70, 0xe7, 0xc4, 0x81, 0x4b, 0xc5, 0x89, 0x0b, 0x12, 0x48, 0x08, 0xb5, 0xfd, 0x00,
+ 0x88, 0x0f, 0xc0, 0x81, 0x99, 0x9d, 0x37, 0xbb, 0xeb, 0xf1, 0xec, 0xac, 0x1d, 0xa7, 0xa2, 0x07,
+ 0xab, 0xde, 0xd9, 0xdf, 0xfb, 0x37, 0xef, 0xf7, 0x5e, 0xde, 0x73, 0xa1, 0xdc, 0x72, 0x7b, 0x81,
+ 0xe7, 0x76, 0x6a, 0x7d, 0xcf, 0x0d, 0x5c, 0x42, 0xda, 0x6e, 0xeb, 0x88, 0x7a, 0x35, 0xff, 0x61,
+ 0xd3, 0xeb, 0x1e, 0x39, 0x41, 0xed, 0xe4, 0xad, 0x6a, 0xc9, 0xef, 0xd3, 0x96, 0x2f, 0x00, 0xd5,
+ 0xb2, 0x7b, 0xf0, 0x15, 0x6d, 0x05, 0xf2, 0xb1, 0x14, 0x9c, 0xf6, 0xa9, 0x7c, 0xa8, 0x1c, 0xba,
+ 0x87, 0x6e, 0xf8, 0x75, 0x9d, 0x7f, 0xc3, 0xd3, 0xab, 0xfd, 0xce, 0xf1, 0xa1, 0xd3, 0x5b, 0x17,
+ 0xff, 0x88, 0x43, 0xeb, 0x16, 0xcc, 0x7c, 0x4c, 0x83, 0x5d, 0xb7, 0x4d, 0x6d, 0xfa, 0xf5, 0x31,
+ 0xf5, 0x03, 0x72, 0x03, 0x0a, 0x3d, 0xf6, 0xb8, 0xef, 0xb4, 0x17, 0x72, 0xaf, 0xe6, 0x56, 0x8a,
+ 0x5b, 0xf0, 0xec, 0xaf, 0xeb, 0xd3, 0x1c, 0xd1, 0xd8, 0xb1, 0xa7, 0xf9, 0xab, 0x46, 0xdb, 0xfa,
+ 0x00, 0x66, 0x23, 0x31, 0xbf, 0xef, 0xf6, 0x7c, 0x4a, 0xd6, 0xe0, 0x02, 0x7f, 0x19, 0x0a, 0x95,
+ 0xea, 0x0b, 0xb5, 0xe1, 0x00, 0x6a, 0x21, 0x3e, 0x44, 0x59, 0x3f, 0x4e, 0xc1, 0x95, 0x3b, 0x8e,
+ 0x1f, 0xaa, 0xf0, 0xa5, 0xe9, 0xdb, 0x50, 0x78, 0xe0, 0x74, 0x02, 0xea, 0xf9, 0xa8, 0x65, 0x4d,
+ 0xa7, 0x45, 0x15, 0xab, 0xdd, 0x16, 0x32, 0xb6, 0x14, 0xae, 0xfe, 0x99, 0x87, 0x02, 0x1e, 0x92,
+ 0x0a, 0x5c, 0xec, 0x35, 0xbb, 0x94, 0x6b, 0x9c, 0x5a, 0x29, 0xda, 0xe2, 0x81, 0xac, 0x43, 0xc9,
+ 0x69, 0xef, 0xf7, 0x3d, 0xfa, 0xc0, 0x79, 0xc4, 0xde, 0xe5, 0xf9, 0xbb, 0xad, 0x19, 0x16, 0x28,
+ 0x34, 0x76, 0xee, 0xe1, 0xa9, 0x0d, 0x4e, 0x5b, 0x7e, 0x27, 0xf7, 0x60, 0xba, 0xd3, 0x3c, 0xa0,
+ 0x1d, 0x7f, 0x61, 0x8a, 0x61, 0x4b, 0xf5, 0xcd, 0x71, 0x3c, 0xab, 0xdd, 0x09, 0x45, 0x3f, 0x62,
+ 0x09, 0x3e, 0xb5, 0x51, 0x0f, 0x69, 0x40, 0xa9, 0x4b, 0xbb, 0x07, 0xec, 0xf5, 0x97, 0x4e, 0xdf,
+ 0x5f, 0xb8, 0xc0, 0xd4, 0xce, 0xd4, 0x6f, 0xa6, 0x5d, 0xdb, 0x1e, 0x4b, 0x7d, 0xed, 0x6e, 0x84,
+ 0xb7, 0x93, 0xb2, 0xa4, 0x0e, 0x17, 0x19, 0x73, 0x58, 0x1c, 0x17, 0x43, 0x25, 0x8b, 0xa9, 0x77,
+ 0xcf, 0x40, 0xb6, 0x80, 0x56, 0xdf, 0x81, 0x52, 0xc2, 0x2b, 0x72, 0x05, 0xa6, 0x8e, 0xe8, 0xa9,
+ 0xc8, 0xb8, 0xcd, 0xbf, 0xf2, 0x8b, 0x3b, 0x69, 0x76, 0x8e, 0x29, 0xbb, 0x1c, 0x7e, 0x26, 0x1e,
+ 0xde, 0xcd, 0x6f, 0xe6, 0xac, 0x6d, 0x98, 0x4b, 0x44, 0x8a, 0xe9, 0xaf, 0xb1, 0x7b, 0xe6, 0x07,
+ 0xe1, 0x3d, 0x9b, 0xf2, 0x2f, 0x60, 0xd6, 0xf7, 0x39, 0x98, 0xbb, 0xdf, 0x6f, 0x37, 0x03, 0x3a,
+ 0x2e, 0xf9, 0xc8, 0xfb, 0x70, 0x39, 0x04, 0x9d, 0xb0, 0xf8, 0x1d, 0xb7, 0x17, 0x3a, 0x58, 0xaa,
+ 0x5f, 0xd3, 0x59, 0xfc, 0x5c, 0x40, 0xec, 0x12, 0x17, 0xc0, 0x07, 0xf2, 0x26, 0x5c, 0xe0, 0x95,
+ 0xc4, 0x32, 0xc9, 0xe5, 0x16, 0x4d, 0x57, 0x6e, 0x87, 0x48, 0x6b, 0x0b, 0x48, 0xd2, 0xd7, 0x33,
+ 0x31, 0x7e, 0x13, 0xe6, 0x6c, 0xda, 0x75, 0x4f, 0xc6, 0x8e, 0xd7, 0xaa, 0x00, 0x49, 0x4a, 0x0a,
+ 0xeb, 0x58, 0xb9, 0x9f, 0x35, 0xfd, 0xa3, 0x84, 0xb2, 0x80, 0x3d, 0x2a, 0xca, 0x38, 0x82, 0x2b,
+ 0xe3, 0xaf, 0xa2, 0xca, 0x15, 0x62, 0x71, 0x1c, 0xfc, 0xa5, 0x29, 0x8e, 0x10, 0x1f, 0xa2, 0xe2,
+ 0x38, 0xc6, 0x36, 0x1d, 0xc5, 0x91, 0xb4, 0x6e, 0xfd, 0x81, 0x9d, 0x80, 0x1f, 0x9e, 0xa1, 0x13,
+ 0x24, 0xc5, 0x86, 0x3b, 0xc1, 0xbf, 0xff, 0x63, 0x27, 0xd0, 0x79, 0xa6, 0xed, 0x04, 0xcc, 0x05,
+ 0x9f, 0x7a, 0x27, 0x4e, 0x8b, 0xf3, 0x40, 0x74, 0x02, 0x74, 0x61, 0x4f, 0x1c, 0x37, 0x76, 0x98,
+ 0x0b, 0x08, 0x69, 0xb4, 0x7d, 0xb2, 0x0c, 0x97, 0x90, 0x35, 0xa2, 0xe4, 0x8b, 0x5b, 0x25, 0x86,
+ 0x2e, 0x08, 0xda, 0xb0, 0xe8, 0x05, 0x6f, 0x7c, 0xb2, 0x03, 0x33, 0xac, 0xd4, 0x1c, 0x8f, 0xb6,
+ 0xf7, 0xfd, 0x80, 0xb1, 0xd7, 0x5f, 0x98, 0x0e, 0x1b, 0xc4, 0x2b, 0x69, 0x29, 0xde, 0xe3, 0x28,
+ 0xbb, 0x8c, 0x42, 0xe1, 0xd3, 0x79, 0x74, 0x0a, 0xbc, 0x89, 0xb8, 0x53, 0x70, 0x42, 0x18, 0x3b,
+ 0x45, 0xc8, 0x10, 0x01, 0xb3, 0x3e, 0x81, 0xca, 0xb6, 0x47, 0x99, 0x2b, 0x78, 0x1b, 0x92, 0x23,
+ 0x1b, 0x58, 0xc6, 0x82, 0x20, 0xd7, 0x75, 0x6a, 0x50, 0x22, 0x51, 0xc9, 0xbb, 0x30, 0xaf, 0x28,
+ 0x43, 0xaf, 0x6e, 0x41, 0x01, 0x6f, 0x18, 0x15, 0x5e, 0x33, 0x28, 0xb4, 0x25, 0xd6, 0xfa, 0x10,
+ 0xe6, 0x58, 0x39, 0x29, 0x9e, 0xad, 0x01, 0xc4, 0x09, 0xc5, 0x82, 0x28, 0xb3, 0x0c, 0x15, 0xa3,
+ 0x7c, 0xda, 0xc5, 0x28, 0x9d, 0x2c, 0x3e, 0x92, 0x54, 0x31, 0x99, 0x3f, 0x3f, 0xe5, 0xa0, 0x22,
+ 0x5a, 0xd5, 0x24, 0x3e, 0x31, 0xe6, 0xcc, 0x4a, 0xf4, 0x18, 0x5d, 0x76, 0x06, 0x65, 0x64, 0xa3,
+ 0xdd, 0x18, 0x68, 0xb4, 0xa3, 0x67, 0x48, 0x09, 0x60, 0xb2, 0x1b, 0xd9, 0x81, 0x8a, 0xe8, 0x3a,
+ 0x13, 0x25, 0xe9, 0x65, 0x98, 0x57, 0xb4, 0x60, 0xfb, 0xfa, 0x39, 0x0f, 0x57, 0x39, 0xc7, 0xf1,
+ 0x3c, 0xea, 0x60, 0x0d, 0xb5, 0x83, 0xad, 0xa7, 0xf5, 0x09, 0x45, 0x72, 0xb8, 0x89, 0x3d, 0xc9,
+ 0x9d, 0x7b, 0x13, 0xdb, 0x53, 0x9a, 0xd8, 0x7b, 0x63, 0x3a, 0xa7, 0xeb, 0x63, 0x93, 0x34, 0x8a,
+ 0x4f, 0xa1, 0x32, 0x68, 0x0d, 0x73, 0xfe, 0x36, 0x5c, 0xc2, 0x1c, 0xc8, 0x76, 0x61, 0x4c, 0x7a,
+ 0x04, 0x8e, 0x9b, 0xc6, 0x2e, 0x0d, 0x1e, 0xba, 0xde, 0xd1, 0x18, 0x4d, 0x03, 0x25, 0x74, 0x4d,
+ 0x23, 0x52, 0x16, 0x53, 0xb2, 0x27, 0x8e, 0x4c, 0x94, 0x94, 0x52, 0x12, 0x6b, 0xdd, 0x0f, 0x9b,
+ 0x86, 0xe2, 0x19, 0x61, 0xd3, 0x04, 0x4b, 0x26, 0xde, 0x57, 0xf8, 0x9d, 0x73, 0x14, 0x65, 0x38,
+ 0x47, 0xf3, 0x31, 0x47, 0x51, 0x96, 0x73, 0x14, 0x01, 0x51, 0x23, 0x39, 0x27, 0x1f, 0xbf, 0x90,
+ 0x65, 0x73, 0xee, 0x6e, 0x46, 0xa5, 0xa4, 0x78, 0x1a, 0x95, 0x12, 0x9e, 0x9f, 0xa1, 0x94, 0x14,
+ 0xc9, 0x17, 0xab, 0x94, 0x52, 0x9c, 0x7b, 0x4e, 0xa5, 0x14, 0x5b, 0x8b, 0x4b, 0x09, 0x73, 0x60,
+ 0x2c, 0x25, 0x99, 0x94, 0x08, 0x1c, 0xe7, 0xeb, 0x6e, 0xb3, 0xd7, 0x3c, 0xa4, 0x5e, 0x94, 0x2f,
+ 0xf1, 0xb7, 0x6f, 0xbb, 0x73, 0xec, 0x07, 0xfc, 0x34, 0x6a, 0xab, 0x2d, 0x71, 0xa2, 0xb4, 0x55,
+ 0xc4, 0x71, 0x2e, 0x20, 0x20, 0xa2, 0x6c, 0xa4, 0x22, 0xa6, 0x2c, 0x42, 0x4c, 0x94, 0x95, 0x52,
+ 0x12, 0x1b, 0xf1, 0x07, 0x5f, 0x9c, 0x81, 0x3f, 0x8a, 0xe4, 0x8b, 0xc5, 0x9f, 0x14, 0xe7, 0x9e,
+ 0x13, 0x7f, 0x62, 0x6b, 0x31, 0x7f, 0xf0, 0xa2, 0x8d, 0xfc, 0x91, 0x59, 0x89, 0xc0, 0x89, 0x91,
+ 0x64, 0x12, 0xaa, 0xf0, 0x91, 0x44, 0xa2, 0xc7, 0x19, 0x49, 0x50, 0x66, 0x8c, 0x91, 0x04, 0xad,
+ 0xeb, 0x46, 0x92, 0xf3, 0x21, 0x6a, 0xfd, 0xb7, 0x39, 0x28, 0x6c, 0x8b, 0x9f, 0x7b, 0x88, 0x03,
+ 0x05, 0xfc, 0x25, 0x85, 0x58, 0x3a, 0xe1, 0xc1, 0x5f, 0x67, 0xaa, 0x37, 0x8c, 0x18, 0x2c, 0xcc,
+ 0xf9, 0x5f, 0x7e, 0xf8, 0xfb, 0xbb, 0xfc, 0x2c, 0x94, 0x43, 0xd0, 0x1b, 0x5d, 0x51, 0xb7, 0xc4,
+ 0x85, 0x62, 0xb4, 0xb7, 0x93, 0xd7, 0x46, 0xf9, 0x01, 0xa3, 0xfa, 0x7a, 0x06, 0xca, 0x6c, 0xd0,
+ 0x03, 0x88, 0xd7, 0x66, 0xa2, 0xd5, 0x35, 0xf4, 0x13, 0x40, 0x75, 0x39, 0x0b, 0x96, 0x69, 0x33,
+ 0x5e, 0x96, 0xf5, 0x36, 0x87, 0xd6, 0x70, 0xbd, 0x4d, 0xcd, 0xce, 0x9d, 0x62, 0x53, 0xe4, 0x90,
+ 0xef, 0x2c, 0xa9, 0x39, 0x4c, 0x2c, 0xcb, 0xa9, 0x39, 0x1c, 0x58, 0x8b, 0xcd, 0x39, 0x0c, 0x37,
+ 0xaa, 0xf4, 0x1c, 0x26, 0x57, 0xcf, 0xf4, 0x1c, 0x0e, 0xac, 0x65, 0x99, 0xf7, 0x19, 0x86, 0x67,
+ 0xb8, 0xcf, 0x64, 0x84, 0xcb, 0x59, 0xb0, 0x4c, 0x9b, 0xf1, 0x46, 0xa4, 0xb7, 0x39, 0xb4, 0x74,
+ 0xe9, 0x6d, 0x0e, 0x2f, 0x56, 0x69, 0x36, 0x1f, 0xc1, 0xe5, 0xe4, 0x04, 0x4a, 0x6e, 0x8e, 0x38,
+ 0x11, 0x57, 0x57, 0xb2, 0x81, 0x66, 0xcb, 0xdf, 0x40, 0x79, 0x60, 0x25, 0x25, 0x5a, 0x8d, 0xba,
+ 0x15, 0xb8, 0xba, 0x3a, 0x02, 0x32, 0xd3, 0xf8, 0xc0, 0xb6, 0xa5, 0x37, 0xae, 0xdb, 0x28, 0xf5,
+ 0xc6, 0xb5, 0xab, 0x9b, 0xc1, 0xf8, 0xc0, 0x52, 0xa5, 0x37, 0xae, 0xdb, 0xde, 0xf4, 0xc6, 0xf5,
+ 0x1b, 0x9a, 0x91, 0x64, 0x38, 0xee, 0xa4, 0x92, 0x6c, 0x70, 0xfa, 0x4d, 0x25, 0x99, 0x3a, 0xca,
+ 0x9a, 0x49, 0x26, 0x67, 0xb3, 0x74, 0x92, 0x29, 0xb3, 0x62, 0x3a, 0xc9, 0xd4, 0x31, 0x2f, 0x93,
+ 0x64, 0x32, 0x60, 0x03, 0xc9, 0x94, 0x98, 0x57, 0x47, 0x40, 0x8e, 0x98, 0x67, 0xa3, 0x71, 0xdd,
+ 0xba, 0x61, 0xca, 0xf3, 0x88, 0xc6, 0x45, 0x9e, 0xf1, 0x6f, 0x70, 0x6a, 0x9e, 0x07, 0x47, 0x93,
+ 0xd4, 0x3c, 0x2b, 0x03, 0x40, 0x46, 0x9e, 0xe5, 0x0c, 0x95, 0x9e, 0x67, 0x65, 0xa6, 0x4b, 0xcf,
+ 0xb3, 0x3a, 0x8e, 0x65, 0xd6, 0xb3, 0x0c, 0xd8, 0x50, 0xcf, 0x4a, 0xcc, 0xab, 0x23, 0x20, 0x8d,
+ 0xc6, 0xb7, 0x16, 0x1f, 0x3f, 0x5d, 0x7a, 0xe9, 0x77, 0xf6, 0xf9, 0xe7, 0xe9, 0x52, 0xee, 0xdb,
+ 0x67, 0x4b, 0xb9, 0xc7, 0xec, 0xf3, 0x2b, 0xfb, 0x3c, 0x61, 0x9f, 0x83, 0xe9, 0xf0, 0x7f, 0x9c,
+ 0x36, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x43, 0x84, 0x5f, 0x00, 0xea, 0x1a, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/control.proto b/vendor/src/github.com/docker/swarmkit/api/control.proto
new file mode 100644
index 0000000000..6150306c1d
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/control.proto
@@ -0,0 +1,275 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "specs.proto";
+import "objects.proto";
+import "types.proto";
+import "gogoproto/gogo.proto";
+import "plugin/plugin.proto";
+
+// Control defines the RPC methods for controlling a cluster.
+service Control {
+ rpc GetNode(GetNodeRequest) returns (GetNodeResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc UpdateNode(UpdateNodeRequest) returns (UpdateNodeResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
+ rpc GetTask(GetTaskRequest) returns (GetTaskResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc ListTasks(ListTasksRequest) returns (ListTasksResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc RemoveTask(RemoveTaskRequest) returns (RemoveTaskResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
+ rpc GetService(GetServiceRequest) returns (GetServiceResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc ListServices(ListServicesRequest) returns (ListServicesResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc CreateService(CreateServiceRequest) returns (CreateServiceResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc UpdateService(UpdateServiceRequest) returns (UpdateServiceResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc RemoveService(RemoveServiceRequest) returns (RemoveServiceResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
+ rpc GetNetwork(GetNetworkRequest) returns (GetNetworkResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc ListNetworks(ListNetworksRequest) returns (ListNetworksResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc RemoveNetwork(RemoveNetworkRequest) returns (RemoveNetworkResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
+ rpc GetCluster(GetClusterRequest) returns (GetClusterResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+ rpc UpdateCluster(UpdateClusterRequest) returns (UpdateClusterResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+}
+
+message GetNodeRequest {
+ string node_id = 1 [(gogoproto.customname) = "NodeID"];
+}
+
+message GetNodeResponse {
+ Node node = 1;
+}
+
+message ListNodesRequest {
+ message Filters {
+ repeated string names = 1;
+ repeated string id_prefixes = 2 [(gogoproto.customname) = "IDPrefixes"];
+ map<string, string> labels = 3;
+ repeated NodeSpec.Membership memberships = 4;
+ repeated NodeRole roles = 5;
+ }
+
+ Filters filters = 1;
+}
+
+message ListNodesResponse {
+ repeated Node nodes = 1;
+}
+
+// UpdateNodeRequest requests an update to the specified node. This may be used
+// to request a new availability for a node, such as PAUSE. Invalid updates
+// will be denied and cause an error.
+message UpdateNodeRequest {
+ string node_id = 1 [(gogoproto.customname) = "NodeID"];
+ Version node_version = 2;
+ NodeSpec spec = 3;
+}
+
+message UpdateNodeResponse {
+ Node node = 1;
+}
+
+// RemoveNodeRequest requests to delete the specified node from store.
+message RemoveNodeRequest {
+ string node_id = 1 [(gogoproto.customname) = "NodeID"];
+}
+
+message RemoveNodeResponse {
+}
+
+message GetTaskRequest {
+ string task_id = 1 [(gogoproto.customname) = "TaskID"];
+}
+
+message GetTaskResponse {
+ Task task = 1;
+}
+
+message RemoveTaskRequest {
+ string task_id = 1 [(gogoproto.customname) = "TaskID"];
+}
+
+message RemoveTaskResponse {
+}
+
+message ListTasksRequest {
+ message Filters {
+ repeated string names = 1;
+ repeated string id_prefixes = 2 [(gogoproto.customname) = "IDPrefixes"];
+ map<string, string> labels = 3;
+ repeated string service_ids = 4 [(gogoproto.customname) = "ServiceIDs"];
+ repeated string node_ids = 5 [(gogoproto.customname) = "NodeIDs"];
+ repeated docker.swarmkit.v1.TaskState desired_states = 6;
+ }
+
+ Filters filters = 1;
+}
+
+message ListTasksResponse {
+ repeated Task tasks = 1;
+}
+
+message CreateServiceRequest {
+ ServiceSpec spec = 1;
+}
+
+message CreateServiceResponse {
+ Service service = 1;
+}
+
+message GetServiceRequest {
+ string service_id = 1 [(gogoproto.customname) = "ServiceID"];
+}
+
+message GetServiceResponse {
+ Service service = 1;
+}
+
+message UpdateServiceRequest {
+ string service_id = 1 [(gogoproto.customname) = "ServiceID"];
+ Version service_version = 2;
+ ServiceSpec spec = 3;
+}
+
+message UpdateServiceResponse {
+ Service service = 1;
+}
+
+message RemoveServiceRequest {
+ string service_id = 1 [(gogoproto.customname) = "ServiceID"];
+}
+
+message RemoveServiceResponse {
+}
+
+message ListServicesRequest {
+ message Filters {
+ repeated string names = 1;
+ repeated string id_prefixes = 2 [(gogoproto.customname) = "IDPrefixes"];
+ map<string, string> labels = 3;
+ }
+
+ Filters filters = 1;
+}
+
+message ListServicesResponse {
+ repeated Service services = 1;
+}
+
+message CreateNetworkRequest {
+ NetworkSpec spec = 1;
+}
+
+message CreateNetworkResponse {
+ Network network = 1;
+}
+
+message GetNetworkRequest {
+ string name = 1;
+ string network_id = 2 [(gogoproto.customname) = "NetworkID"];
+}
+
+message GetNetworkResponse {
+ Network network = 1;
+}
+
+message RemoveNetworkRequest {
+ string name = 1;
+ string network_id = 2 [(gogoproto.customname) = "NetworkID"];
+}
+
+message RemoveNetworkResponse {}
+
+message ListNetworksRequest {
+ message Filters {
+ repeated string names = 1;
+ repeated string id_prefixes = 2 [(gogoproto.customname) = "IDPrefixes"];
+ map<string, string> labels = 3;
+ }
+
+ Filters filters = 1;
+}
+
+message ListNetworksResponse {
+ repeated Network networks = 1;
+}
+
+message RemoveManagerResponse {}
+
+message GetClusterRequest {
+ string cluster_id = 1 [(gogoproto.customname) = "ClusterID"];
+}
+
+message GetClusterResponse {
+ Cluster cluster = 1;
+}
+
+message ListClustersRequest {
+ message Filters {
+ repeated string names = 1;
+ repeated string id_prefixes = 2 [(gogoproto.customname) = "IDPrefixes"];
+ map<string, string> labels = 3;
+ }
+
+ Filters filters = 1;
+}
+
+message ListClustersResponse {
+ repeated Cluster clusters = 1;
+}
+
+message UpdateClusterRequest {
+ // ClusterID is the cluster ID to update.
+ string cluster_id = 1 [(gogoproto.customname) = "ClusterID"];
+
+ // ClusterVersion is the version of the cluster being updated.
+ Version cluster_version = 2;
+
+ // Spec is the new spec to apply to the cluster.
+ ClusterSpec spec = 3;
+}
+
+message UpdateClusterResponse {
+ Cluster cluster = 1;
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go b/vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go
new file mode 100644
index 0000000000..ada37e89b7
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go
@@ -0,0 +1,2440 @@
+// Code generated by protoc-gen-gogo.
+// source: dispatcher.proto
+// DO NOT EDIT!
+
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/docker/swarmkit/protobuf/plugin"
+import docker_swarmkit_v11 "github.com/docker/swarmkit/api/duration"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import codes "google.golang.org/grpc/codes"
+import metadata "google.golang.org/grpc/metadata"
+import transport "google.golang.org/grpc/transport"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// SessionRequest starts a session.
+type SessionRequest struct {
+ Description *NodeDescription `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
+}
+
+func (m *SessionRequest) Reset() { *m = SessionRequest{} }
+func (*SessionRequest) ProtoMessage() {}
+func (*SessionRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{0} }
+
+// SessionMessage instructs an agent on various actions as part of the current
+// session. An agent should act immediately on the contents.
+type SessionMessage struct {
+ // SessionID is allocated after a successful registration. It should be
+ // used on all RPC calls after registration. A dispatcher may choose to
+ // change the SessionID, at which time an agent must re-register and obtain
+ // a new one.
+ //
+ // All Dispatcher calls after register should include the SessionID. If the
+ // Dispatcher so chooses, it may reject the call with an InvalidArgument
+ // error code, at which time the agent should call Register to start a new
+ // session.
+ //
+ // As a rule, once an agent has a SessionID, it should never save it to
+ // disk or try to otherwise reuse. If the agent loses its SessionID, it
+ // must start a new session through a call to Register. A Dispatcher may
+ // choose to reuse the SessionID, if it sees fit, but it is not advised.
+ //
+ // The actual implementation of the SessionID is Dispatcher specific and
+ // should be treated as opaque by agents.
+ //
+ // From a Dispatcher perspective, there are many ways to use the SessionID
+ // to ensure uniqueness of a set of client RPC calls. One method is to keep
+ // the SessionID unique to every call to Register in a single Dispatcher
+ // instance. This ensures that the SessionID represents the unique
+ // session from a single Agent to Manager. If the Agent restarts, we
+ // allocate a new session, since the restarted Agent is not aware of the
+ // new SessionID.
+ //
+ // The most compelling use case is to support duplicate node detection. If
+ // one clones a virtual machine, including certificate material, two nodes
+ // may end up with the same identity. This can also happen if two identical
+ // agent processes are coming from the same node. If the SessionID is
+ // replicated through the cluster, we can immediately detect the condition
+ // and address it.
+ //
+ // Extending from the case above, we can actually detect a compromised
+ // identity. Coupled with provisions to rebuild node identity, we can ban
+ // the compromised node identity and have the nodes re-authenticate and
+ // build a new identity. At this time, an administrator can then
+ // re-authorize the compromised nodes, if it was a mistake or ensure that a
+ // misbehaved node can no longer connect to the cluster.
+ //
+ // We considered placing this field in a GRPC header. Because this is a
+ // critical feature of the protocol, we thought it should be represented
+ // directly in the RPC message set.
+ SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
+ // Node identifies the registering node.
+ Node *Node `protobuf:"bytes,2,opt,name=node" json:"node,omitempty"`
+ // Managers provides a weight list of alternative dispatchers
+ Managers []*WeightedPeer `protobuf:"bytes,3,rep,name=managers" json:"managers,omitempty"`
+ // Symmetric encryption key distributed by the lead manager. Used by agents
+ // for securing network bootstrapping and communication.
+ NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,4,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"`
+}
+
+func (m *SessionMessage) Reset() { *m = SessionMessage{} }
+func (*SessionMessage) ProtoMessage() {}
+func (*SessionMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{1} }
+
+// HeartbeatRequest provides identifying properties for a single heartbeat.
+type HeartbeatRequest struct {
+ SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
+}
+
+func (m *HeartbeatRequest) Reset() { *m = HeartbeatRequest{} }
+func (*HeartbeatRequest) ProtoMessage() {}
+func (*HeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{2} }
+
+type HeartbeatResponse struct {
+ // Period is the duration to wait before sending the next heartbeat.
+ // Well-behaved agents should update this on every heartbeat round trip.
+ Period docker_swarmkit_v11.Duration `protobuf:"bytes,1,opt,name=period" json:"period"`
+}
+
+func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} }
+func (*HeartbeatResponse) ProtoMessage() {}
+func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{3} }
+
+type UpdateTaskStatusRequest struct {
+ // Tasks should contain all statuses for running tasks. Only the status
+ // field must be set. The spec is not required.
+ SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
+ Updates []*UpdateTaskStatusRequest_TaskStatusUpdate `protobuf:"bytes,3,rep,name=updates" json:"updates,omitempty"`
+}
+
+func (m *UpdateTaskStatusRequest) Reset() { *m = UpdateTaskStatusRequest{} }
+func (*UpdateTaskStatusRequest) ProtoMessage() {}
+func (*UpdateTaskStatusRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorDispatcher, []int{4}
+}
+
+type UpdateTaskStatusRequest_TaskStatusUpdate struct {
+ TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
+ Status *TaskStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"`
+}
+
+func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Reset() {
+ *m = UpdateTaskStatusRequest_TaskStatusUpdate{}
+}
+func (*UpdateTaskStatusRequest_TaskStatusUpdate) ProtoMessage() {}
+func (*UpdateTaskStatusRequest_TaskStatusUpdate) Descriptor() ([]byte, []int) {
+ return fileDescriptorDispatcher, []int{4, 0}
+}
+
+type UpdateTaskStatusResponse struct {
+}
+
+func (m *UpdateTaskStatusResponse) Reset() { *m = UpdateTaskStatusResponse{} }
+func (*UpdateTaskStatusResponse) ProtoMessage() {}
+func (*UpdateTaskStatusResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptorDispatcher, []int{5}
+}
+
+type TasksRequest struct {
+ SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
+}
+
+func (m *TasksRequest) Reset() { *m = TasksRequest{} }
+func (*TasksRequest) ProtoMessage() {}
+func (*TasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{6} }
+
+type TasksMessage struct {
+ // Tasks is the set of tasks that should be running on the node.
+ // Tasks outside of this set running on the node should be terminated.
+ Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"`
+}
+
+func (m *TasksMessage) Reset() { *m = TasksMessage{} }
+func (*TasksMessage) ProtoMessage() {}
+func (*TasksMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{7} }
+
+func init() {
+ proto.RegisterType((*SessionRequest)(nil), "docker.swarmkit.v1.SessionRequest")
+ proto.RegisterType((*SessionMessage)(nil), "docker.swarmkit.v1.SessionMessage")
+ proto.RegisterType((*HeartbeatRequest)(nil), "docker.swarmkit.v1.HeartbeatRequest")
+ proto.RegisterType((*HeartbeatResponse)(nil), "docker.swarmkit.v1.HeartbeatResponse")
+ proto.RegisterType((*UpdateTaskStatusRequest)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest")
+ proto.RegisterType((*UpdateTaskStatusRequest_TaskStatusUpdate)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest.TaskStatusUpdate")
+ proto.RegisterType((*UpdateTaskStatusResponse)(nil), "docker.swarmkit.v1.UpdateTaskStatusResponse")
+ proto.RegisterType((*TasksRequest)(nil), "docker.swarmkit.v1.TasksRequest")
+ proto.RegisterType((*TasksMessage)(nil), "docker.swarmkit.v1.TasksMessage")
+}
+
+type authenticatedWrapperDispatcherServer struct {
+ local DispatcherServer
+ authorize func(context.Context, []string) error
+}
+
+func NewAuthenticatedWrapperDispatcherServer(local DispatcherServer, authorize func(context.Context, []string) error) DispatcherServer {
+ return &authenticatedWrapperDispatcherServer{
+ local: local,
+ authorize: authorize,
+ }
+}
+
+func (p *authenticatedWrapperDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error {
+
+ if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil {
+ return err
+ }
+ return p.local.Session(r, stream)
+}
+
+func (p *authenticatedWrapperDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.Heartbeat(ctx, r)
+}
+
+func (p *authenticatedWrapperDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.UpdateTaskStatus(ctx, r)
+}
+
+func (p *authenticatedWrapperDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error {
+
+ if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil {
+ return err
+ }
+ return p.local.Tasks(r, stream)
+}
+
+func (m *SessionRequest) Copy() *SessionRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &SessionRequest{
+ Description: m.Description.Copy(),
+ }
+
+ return o
+}
+
+func (m *SessionMessage) Copy() *SessionMessage {
+ if m == nil {
+ return nil
+ }
+
+ o := &SessionMessage{
+ SessionID: m.SessionID,
+ Node: m.Node.Copy(),
+ }
+
+ if m.Managers != nil {
+ o.Managers = make([]*WeightedPeer, 0, len(m.Managers))
+ for _, v := range m.Managers {
+ o.Managers = append(o.Managers, v.Copy())
+ }
+ }
+
+ if m.NetworkBootstrapKeys != nil {
+ o.NetworkBootstrapKeys = make([]*EncryptionKey, 0, len(m.NetworkBootstrapKeys))
+ for _, v := range m.NetworkBootstrapKeys {
+ o.NetworkBootstrapKeys = append(o.NetworkBootstrapKeys, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *HeartbeatRequest) Copy() *HeartbeatRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &HeartbeatRequest{
+ SessionID: m.SessionID,
+ }
+
+ return o
+}
+
+func (m *HeartbeatResponse) Copy() *HeartbeatResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &HeartbeatResponse{
+ Period: *m.Period.Copy(),
+ }
+
+ return o
+}
+
+func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateTaskStatusRequest{
+ SessionID: m.SessionID,
+ }
+
+ if m.Updates != nil {
+ o.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, 0, len(m.Updates))
+ for _, v := range m.Updates {
+ o.Updates = append(o.Updates, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Copy() *UpdateTaskStatusRequest_TaskStatusUpdate {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateTaskStatusRequest_TaskStatusUpdate{
+ TaskID: m.TaskID,
+ Status: m.Status.Copy(),
+ }
+
+ return o
+}
+
+func (m *UpdateTaskStatusResponse) Copy() *UpdateTaskStatusResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateTaskStatusResponse{}
+
+ return o
+}
+
+func (m *TasksRequest) Copy() *TasksRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &TasksRequest{
+ SessionID: m.SessionID,
+ }
+
+ return o
+}
+
+func (m *TasksMessage) Copy() *TasksMessage {
+ if m == nil {
+ return nil
+ }
+
+ o := &TasksMessage{}
+
+ if m.Tasks != nil {
+ o.Tasks = make([]*Task, 0, len(m.Tasks))
+ for _, v := range m.Tasks {
+ o.Tasks = append(o.Tasks, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (this *SessionRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.SessionRequest{")
+ if this.Description != nil {
+ s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SessionMessage) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.SessionMessage{")
+ s = append(s, "SessionID: "+fmt.Sprintf("%#v", this.SessionID)+",\n")
+ if this.Node != nil {
+ s = append(s, "Node: "+fmt.Sprintf("%#v", this.Node)+",\n")
+ }
+ if this.Managers != nil {
+ s = append(s, "Managers: "+fmt.Sprintf("%#v", this.Managers)+",\n")
+ }
+ if this.NetworkBootstrapKeys != nil {
+ s = append(s, "NetworkBootstrapKeys: "+fmt.Sprintf("%#v", this.NetworkBootstrapKeys)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *HeartbeatRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.HeartbeatRequest{")
+ s = append(s, "SessionID: "+fmt.Sprintf("%#v", this.SessionID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *HeartbeatResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.HeartbeatResponse{")
+ s = append(s, "Period: "+strings.Replace(this.Period.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateTaskStatusRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.UpdateTaskStatusRequest{")
+ s = append(s, "SessionID: "+fmt.Sprintf("%#v", this.SessionID)+",\n")
+ if this.Updates != nil {
+ s = append(s, "Updates: "+fmt.Sprintf("%#v", this.Updates)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateTaskStatusRequest_TaskStatusUpdate) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.UpdateTaskStatusRequest_TaskStatusUpdate{")
+ s = append(s, "TaskID: "+fmt.Sprintf("%#v", this.TaskID)+",\n")
+ if this.Status != nil {
+ s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateTaskStatusResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.UpdateTaskStatusResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TasksRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.TasksRequest{")
+ s = append(s, "SessionID: "+fmt.Sprintf("%#v", this.SessionID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TasksMessage) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.TasksMessage{")
+ if this.Tasks != nil {
+ s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringDispatcher(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringDispatcher(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion2
+
+// Client API for Dispatcher service
+
+type DispatcherClient interface {
+ // Session starts an agent session with the dispatcher. The session is
+ // started after the first SessionMessage is received.
+ //
+ // Once started, the agent is controlled with a stream of SessionMessage.
+ // Agents should list on the stream at all times for instructions.
+ Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error)
+ // Heartbeat is heartbeat method for nodes. It returns new TTL in response.
+ // Node should send new heartbeat earlier than now + TTL, otherwise it will
+ // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
+ Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error)
+ // UpdateTaskStatus updates status of task. Node should send such updates
+ // on every status change of its tasks.
+ //
+ // Whether receiving batch updates or single status updates, this method
+ // should be accepting. Errors should only be returned if the entire update
+ // should be retried, due to data loss or other problems.
+ //
+ // If a task is unknown the dispatcher, the status update should be
+ // accepted regardless.
+ UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error)
+ // Tasks is a stream of tasks state for node. Each message contains full list
+ // of tasks which should be run on node, if task is not present in that list,
+ // it should be terminated.
+ Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error)
+}
+
+type dispatcherClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewDispatcherClient(cc *grpc.ClientConn) DispatcherClient {
+ return &dispatcherClient{cc}
+}
+
+func (c *dispatcherClient) Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Dispatcher/Session", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &dispatcherSessionClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Dispatcher_SessionClient interface {
+ Recv() (*SessionMessage, error)
+ grpc.ClientStream
+}
+
+type dispatcherSessionClient struct {
+ grpc.ClientStream
+}
+
+func (x *dispatcherSessionClient) Recv() (*SessionMessage, error) {
+ m := new(SessionMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *dispatcherClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) {
+ out := new(HeartbeatResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/Heartbeat", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *dispatcherClient) UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) {
+ out := new(UpdateTaskStatusResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *dispatcherClient) Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.Dispatcher/Tasks", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &dispatcherTasksClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Dispatcher_TasksClient interface {
+ Recv() (*TasksMessage, error)
+ grpc.ClientStream
+}
+
+type dispatcherTasksClient struct {
+ grpc.ClientStream
+}
+
+func (x *dispatcherTasksClient) Recv() (*TasksMessage, error) {
+ m := new(TasksMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// Server API for Dispatcher service
+
+type DispatcherServer interface {
+ // Session starts an agent session with the dispatcher. The session is
+ // started after the first SessionMessage is received.
+ //
+ // Once started, the agent is controlled with a stream of SessionMessage.
+ // Agents should list on the stream at all times for instructions.
+ Session(*SessionRequest, Dispatcher_SessionServer) error
+ // Heartbeat is heartbeat method for nodes. It returns new TTL in response.
+ // Node should send new heartbeat earlier than now + TTL, otherwise it will
+ // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
+ Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error)
+ // UpdateTaskStatus updates status of task. Node should send such updates
+ // on every status change of its tasks.
+ //
+ // Whether receiving batch updates or single status updates, this method
+ // should be accepting. Errors should only be returned if the entire update
+ // should be retried, due to data loss or other problems.
+ //
+ // If a task is unknown the dispatcher, the status update should be
+ // accepted regardless.
+ UpdateTaskStatus(context.Context, *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error)
+ // Tasks is a stream of tasks state for node. Each message contains full list
+ // of tasks which should be run on node, if task is not present in that list,
+ // it should be terminated.
+ Tasks(*TasksRequest, Dispatcher_TasksServer) error
+}
+
+func RegisterDispatcherServer(s *grpc.Server, srv DispatcherServer) {
+ s.RegisterService(&_Dispatcher_serviceDesc, srv)
+}
+
+func _Dispatcher_Session_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SessionRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(DispatcherServer).Session(m, &dispatcherSessionServer{stream})
+}
+
+type Dispatcher_SessionServer interface {
+ Send(*SessionMessage) error
+ grpc.ServerStream
+}
+
+type dispatcherSessionServer struct {
+ grpc.ServerStream
+}
+
+func (x *dispatcherSessionServer) Send(m *SessionMessage) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Dispatcher_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HeartbeatRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DispatcherServer).Heartbeat(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Dispatcher/Heartbeat",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DispatcherServer).Heartbeat(ctx, req.(*HeartbeatRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Dispatcher_UpdateTaskStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateTaskStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DispatcherServer).UpdateTaskStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DispatcherServer).UpdateTaskStatus(ctx, req.(*UpdateTaskStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Dispatcher_Tasks_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(TasksRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(DispatcherServer).Tasks(m, &dispatcherTasksServer{stream})
+}
+
+type Dispatcher_TasksServer interface {
+ Send(*TasksMessage) error
+ grpc.ServerStream
+}
+
+type dispatcherTasksServer struct {
+ grpc.ServerStream
+}
+
+func (x *dispatcherTasksServer) Send(m *TasksMessage) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+var _Dispatcher_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "docker.swarmkit.v1.Dispatcher",
+ HandlerType: (*DispatcherServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Heartbeat",
+ Handler: _Dispatcher_Heartbeat_Handler,
+ },
+ {
+ MethodName: "UpdateTaskStatus",
+ Handler: _Dispatcher_UpdateTaskStatus_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Session",
+ Handler: _Dispatcher_Session_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "Tasks",
+ Handler: _Dispatcher_Tasks_Handler,
+ ServerStreams: true,
+ },
+ },
+}
+
+func (m *SessionRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SessionRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Description != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(m.Description.Size()))
+ n1, err := m.Description.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ return i, nil
+}
+
+func (m *SessionMessage) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SessionMessage) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.SessionID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(len(m.SessionID)))
+ i += copy(data[i:], m.SessionID)
+ }
+ if m.Node != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(m.Node.Size()))
+ n2, err := m.Node.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if len(m.Managers) > 0 {
+ for _, msg := range m.Managers {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.NetworkBootstrapKeys) > 0 {
+ for _, msg := range m.NetworkBootstrapKeys {
+ data[i] = 0x22
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *HeartbeatRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HeartbeatRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.SessionID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(len(m.SessionID)))
+ i += copy(data[i:], m.SessionID)
+ }
+ return i, nil
+}
+
+func (m *HeartbeatResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HeartbeatResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(m.Period.Size()))
+ n3, err := m.Period.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *UpdateTaskStatusRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateTaskStatusRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.SessionID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(len(m.SessionID)))
+ i += copy(data[i:], m.SessionID)
+ }
+ if len(m.Updates) > 0 {
+ for _, msg := range m.Updates {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateTaskStatusRequest_TaskStatusUpdate) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.TaskID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(len(m.TaskID)))
+ i += copy(data[i:], m.TaskID)
+ }
+ if m.Status != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(m.Status.Size()))
+ n4, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ return i, nil
+}
+
+func (m *UpdateTaskStatusResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateTaskStatusResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *TasksRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TasksRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.SessionID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(len(m.SessionID)))
+ i += copy(data[i:], m.SessionID)
+ }
+ return i, nil
+}
+
+func (m *TasksMessage) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TasksMessage) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Tasks) > 0 {
+ for _, msg := range m.Tasks {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDispatcher(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func encodeFixed64Dispatcher(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Dispatcher(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintDispatcher(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+type raftProxyDispatcherServer struct {
+ local DispatcherServer
+ connSelector *raftpicker.ConnSelector
+ cluster raftpicker.RaftCluster
+ ctxMods []func(context.Context) (context.Context, error)
+}
+
+func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector *raftpicker.ConnSelector, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) DispatcherServer {
+ redirectChecker := func(ctx context.Context) (context.Context, error) {
+ s, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ }
+ addr := s.ServerTransport().RemoteAddr().String()
+ md, ok := metadata.FromContext(ctx)
+ if ok && len(md["redirect"]) != 0 {
+ return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ }
+ if !ok {
+ md = metadata.New(map[string]string{})
+ }
+ md["redirect"] = append(md["redirect"], addr)
+ return metadata.NewContext(ctx, md), nil
+ }
+ mods := []func(context.Context) (context.Context, error){redirectChecker}
+ mods = append(mods, ctxMod)
+
+ return &raftProxyDispatcherServer{
+ local: local,
+ cluster: cluster,
+ connSelector: connSelector,
+ ctxMods: mods,
+ }
+}
+func (p *raftProxyDispatcherServer) runCtxMods(ctx context.Context) (context.Context, error) {
+ var err error
+ for _, mod := range p.ctxMods {
+ ctx, err = mod(ctx)
+ if err != nil {
+ return ctx, err
+ }
+ }
+ return ctx, nil
+}
+
+func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error {
+
+ if p.cluster.IsLeader() {
+ return p.local.Session(r, stream)
+ }
+ ctx, err := p.runCtxMods(stream.Context())
+ if err != nil {
+ return err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return err
+ }
+ clientStream, err := NewDispatcherClient(conn).Session(ctx, r)
+
+ if err != nil {
+ return err
+ }
+
+ for {
+ msg, err := clientStream.Recv()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if err := stream.Send(msg); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *raftProxyDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.Heartbeat(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewDispatcherClient(conn).Heartbeat(ctx, r)
+}
+
+func (p *raftProxyDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.UpdateTaskStatus(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewDispatcherClient(conn).UpdateTaskStatus(ctx, r)
+}
+
+func (p *raftProxyDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error {
+
+ if p.cluster.IsLeader() {
+ return p.local.Tasks(r, stream)
+ }
+ ctx, err := p.runCtxMods(stream.Context())
+ if err != nil {
+ return err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return err
+ }
+ clientStream, err := NewDispatcherClient(conn).Tasks(ctx, r)
+
+ if err != nil {
+ return err
+ }
+
+ for {
+ msg, err := clientStream.Recv()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if err := stream.Send(msg); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *SessionRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Description != nil {
+ l = m.Description.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ return n
+}
+
+func (m *SessionMessage) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.SessionID)
+ if l > 0 {
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ if m.Node != nil {
+ l = m.Node.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ if len(m.Managers) > 0 {
+ for _, e := range m.Managers {
+ l = e.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ }
+ if len(m.NetworkBootstrapKeys) > 0 {
+ for _, e := range m.NetworkBootstrapKeys {
+ l = e.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HeartbeatRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.SessionID)
+ if l > 0 {
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ return n
+}
+
+func (m *HeartbeatResponse) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Period.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ return n
+}
+
+func (m *UpdateTaskStatusRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.SessionID)
+ if l > 0 {
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ if len(m.Updates) > 0 {
+ for _, e := range m.Updates {
+ l = e.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.TaskID)
+ if l > 0 {
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ if m.Status != nil {
+ l = m.Status.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ return n
+}
+
+func (m *UpdateTaskStatusResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *TasksRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.SessionID)
+ if l > 0 {
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ return n
+}
+
+func (m *TasksMessage) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Tasks) > 0 {
+ for _, e := range m.Tasks {
+ l = e.Size()
+ n += 1 + l + sovDispatcher(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovDispatcher(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozDispatcher(x uint64) (n int) {
+ return sovDispatcher(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *SessionRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SessionRequest{`,
+ `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SessionMessage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SessionMessage{`,
+ `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`,
+ `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`,
+ `Managers:` + strings.Replace(fmt.Sprintf("%v", this.Managers), "WeightedPeer", "WeightedPeer", 1) + `,`,
+ `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HeartbeatRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HeartbeatRequest{`,
+ `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HeartbeatResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HeartbeatResponse{`,
+ `Period:` + strings.Replace(strings.Replace(this.Period.String(), "Duration", "docker_swarmkit_v11.Duration", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateTaskStatusRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateTaskStatusRequest{`,
+ `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`,
+ `Updates:` + strings.Replace(fmt.Sprintf("%v", this.Updates), "UpdateTaskStatusRequest_TaskStatusUpdate", "UpdateTaskStatusRequest_TaskStatusUpdate", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateTaskStatusRequest_TaskStatusUpdate) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateTaskStatusRequest_TaskStatusUpdate{`,
+ `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`,
+ `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "TaskStatus", "TaskStatus", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateTaskStatusResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateTaskStatusResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TasksRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TasksRequest{`,
+ `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TasksMessage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TasksMessage{`,
+ `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringDispatcher(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *SessionRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SessionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SessionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Description == nil {
+ m.Description = &NodeDescription{}
+ }
+ if err := m.Description.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SessionMessage) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SessionMessage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SessionMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SessionID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Node == nil {
+ m.Node = &Node{}
+ }
+ if err := m.Node.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Managers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Managers = append(m.Managers, &WeightedPeer{})
+ if err := m.Managers[len(m.Managers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{})
+ if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HeartbeatRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HeartbeatRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SessionID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HeartbeatResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HeartbeatResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Period.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateTaskStatusRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateTaskStatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateTaskStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SessionID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Updates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Updates = append(m.Updates, &UpdateTaskStatusRequest_TaskStatusUpdate{})
+ if err := m.Updates[len(m.Updates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TaskStatusUpdate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TaskStatusUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TaskID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Status == nil {
+ m.Status = &TaskStatus{}
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateTaskStatusResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateTaskStatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateTaskStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TasksRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TasksRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TasksRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SessionID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TasksMessage) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TasksMessage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TasksMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tasks = append(m.Tasks, &Task{})
+ if err := m.Tasks[len(m.Tasks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDispatcher(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDispatcher
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipDispatcher(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthDispatcher
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDispatcher
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipDispatcher(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthDispatcher = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowDispatcher = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorDispatcher = []byte{
+ // 626 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xdf, 0x6e, 0x12, 0x4f,
+ 0x14, 0xee, 0x02, 0x85, 0x1f, 0x87, 0xf2, 0x0b, 0x8e, 0x8d, 0xdd, 0x6c, 0x2a, 0xc5, 0x45, 0x13,
+ 0x13, 0xeb, 0xa2, 0x35, 0xf1, 0xc2, 0x10, 0x63, 0x08, 0x4d, 0x6c, 0x8c, 0x7f, 0xb2, 0x55, 0xb9,
+ 0x24, 0x0b, 0x3b, 0xa1, 0x2b, 0x76, 0x67, 0x9d, 0x19, 0xac, 0x5c, 0x98, 0x98, 0x78, 0x6f, 0x8c,
+ 0x57, 0x3e, 0x85, 0xcf, 0x41, 0xbc, 0xf2, 0xd2, 0xab, 0xc6, 0xf6, 0x01, 0x8c, 0x8f, 0xe0, 0xec,
+ 0xec, 0x2c, 0x20, 0x5d, 0xb4, 0xf4, 0x62, 0xc2, 0xcc, 0x99, 0xef, 0xfb, 0xce, 0xc7, 0x39, 0x67,
+ 0x16, 0x4a, 0xae, 0xc7, 0x02, 0x87, 0x77, 0xf7, 0x30, 0xb5, 0x02, 0x4a, 0x38, 0x41, 0xc8, 0x25,
+ 0xdd, 0xbe, 0x38, 0xb1, 0x03, 0x87, 0xee, 0xf7, 0x3d, 0x6e, 0xbd, 0xbe, 0x69, 0x14, 0xf8, 0x30,
+ 0xc0, 0x2c, 0x02, 0x18, 0x45, 0xd2, 0x79, 0x81, 0xbb, 0x3c, 0x3e, 0xae, 0xf6, 0x48, 0x8f, 0xc8,
+ 0x6d, 0x2d, 0xdc, 0xa9, 0xe8, 0xf9, 0xe0, 0xe5, 0xa0, 0xe7, 0xf9, 0xb5, 0xe8, 0x47, 0x05, 0xd7,
+ 0xdc, 0x01, 0x75, 0xb8, 0x47, 0xfc, 0x5a, 0xbc, 0x89, 0x2e, 0xcc, 0x16, 0xfc, 0xbf, 0x8b, 0x19,
+ 0x13, 0x01, 0x1b, 0xbf, 0x1a, 0x60, 0xc6, 0xd1, 0x36, 0x14, 0x5c, 0xcc, 0xba, 0xd4, 0x0b, 0x42,
+ 0x98, 0xae, 0x55, 0xb4, 0xab, 0x85, 0xad, 0xaa, 0x75, 0xd2, 0x9b, 0xf5, 0x88, 0xb8, 0xb8, 0x39,
+ 0x81, 0xda, 0xd3, 0x3c, 0xf3, 0x7d, 0x6a, 0xac, 0xfc, 0x50, 0xfc, 0x38, 0x3d, 0x8c, 0x36, 0x01,
+ 0x58, 0x14, 0x69, 0x7b, 0xae, 0x14, 0xce, 0x37, 0x8a, 0xc7, 0x87, 0x1b, 0x79, 0x85, 0xdb, 0x69,
+ 0xda, 0x79, 0x05, 0xd8, 0x71, 0x05, 0x3a, 0xe3, 0x8b, 0x04, 0x7a, 0x4a, 0x1a, 0xd0, 0xe7, 0x19,
+ 0xb0, 0x25, 0x0a, 0xd5, 0xe1, 0xbf, 0x7d, 0xc7, 0x17, 0x59, 0x28, 0xd3, 0xd3, 0x95, 0xb4, 0x60,
+ 0x54, 0x92, 0x18, 0x2d, 0xec, 0xf5, 0xf6, 0x38, 0x76, 0x9f, 0x60, 0x4c, 0xed, 0x31, 0x03, 0xb5,
+ 0xe0, 0x82, 0x8f, 0xf9, 0x01, 0xa1, 0xfd, 0x76, 0x87, 0x10, 0xce, 0x38, 0x75, 0x82, 0x76, 0x1f,
+ 0x0f, 0x99, 0x9e, 0x91, 0x5a, 0x97, 0x92, 0xb4, 0xb6, 0xfd, 0x2e, 0x1d, 0xca, 0x3f, 0xfb, 0x00,
+ 0x0f, 0xed, 0x55, 0x25, 0xd0, 0x88, 0xf9, 0x22, 0xc8, 0xcc, 0x7b, 0x50, 0xba, 0x8f, 0x1d, 0xca,
+ 0x3b, 0xd8, 0xe1, 0x71, 0x81, 0x17, 0x2a, 0x83, 0xf9, 0x18, 0xce, 0x4d, 0x29, 0xb0, 0x80, 0xf8,
+ 0x0c, 0xa3, 0x3b, 0x90, 0x0d, 0x30, 0xf5, 0x88, 0xab, 0xda, 0xb3, 0x9e, 0xe4, 0xaf, 0xa9, 0x3a,
+ 0xdd, 0xc8, 0x8c, 0x0e, 0x37, 0x96, 0x6c, 0xc5, 0x30, 0x3f, 0xa6, 0x60, 0xed, 0x59, 0xe0, 0x3a,
+ 0x1c, 0x3f, 0x75, 0x58, 0x7f, 0x97, 0x3b, 0x7c, 0xc0, 0xce, 0x64, 0x0d, 0x3d, 0x87, 0xdc, 0x40,
+ 0x0a, 0xc5, 0x25, 0xaf, 0x27, 0xd9, 0x98, 0x93, 0xcb, 0x9a, 0x44, 0x22, 0x84, 0x1d, 0x8b, 0x19,
+ 0x04, 0x4a, 0xb3, 0x97, 0xa8, 0x0a, 0x39, 0x2e, 0x62, 0x13, 0x5b, 0x20, 0x6c, 0x65, 0x43, 0x98,
+ 0xf0, 0x94, 0x0d, 0xaf, 0x84, 0xa1, 0xdb, 0x90, 0x65, 0x92, 0xa4, 0x86, 0xa6, 0x9c, 0xe4, 0x67,
+ 0xca, 0x89, 0x42, 0x9b, 0x06, 0xe8, 0x27, 0x5d, 0x46, 0xa5, 0x36, 0xeb, 0xb0, 0x12, 0x46, 0xcf,
+ 0x56, 0x22, 0xf3, 0xae, 0x62, 0xc7, 0x4f, 0xc0, 0x82, 0xe5, 0xd0, 0x2b, 0x13, 0xc4, 0xf4, 0xbc,
+ 0xa9, 0x0e, 0x09, 0x76, 0x04, 0xdb, 0xfa, 0x90, 0x01, 0x68, 0x8e, 0xbf, 0x13, 0xe8, 0x0d, 0xe4,
+ 0x54, 0x1a, 0x64, 0x26, 0x51, 0xff, 0x7c, 0xca, 0xc6, 0xdf, 0x30, 0xca, 0x91, 0x59, 0xfd, 0xfa,
+ 0xe5, 0xe7, 0xe7, 0xd4, 0x45, 0x58, 0x91, 0x98, 0xeb, 0xe1, 0x08, 0x63, 0x0a, 0xc5, 0xe8, 0xa4,
+ 0x1e, 0xc8, 0x0d, 0x0d, 0xbd, 0x85, 0xfc, 0x78, 0x0c, 0xd1, 0xe5, 0x24, 0xdd, 0xd9, 0x39, 0x37,
+ 0xae, 0xfc, 0x03, 0xa5, 0x0a, 0x7c, 0x1a, 0x03, 0xe8, 0x93, 0x06, 0xa5, 0xd9, 0x16, 0xa1, 0x6b,
+ 0x0b, 0x8c, 0x9b, 0xb1, 0x79, 0x3a, 0xf0, 0x22, 0xa6, 0x28, 0x2c, 0xcb, 0xe6, 0xa2, 0xca, 0xbc,
+ 0x36, 0x8e, 0xb3, 0xcf, 0x47, 0x2c, 0xd6, 0x87, 0xc6, 0xfa, 0xe8, 0xa8, 0xbc, 0xf4, 0x5d, 0xac,
+ 0x5f, 0x47, 0x65, 0xed, 0xdd, 0x71, 0x59, 0x1b, 0x89, 0xf5, 0x4d, 0xac, 0x1f, 0x62, 0x75, 0xb2,
+ 0xf2, 0xa3, 0x7e, 0xeb, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x41, 0x8f, 0x02, 0x5c, 0x06,
+ 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/dispatcher.proto b/vendor/src/github.com/docker/swarmkit/api/dispatcher.proto
new file mode 100644
index 0000000000..98e79e9ac9
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/dispatcher.proto
@@ -0,0 +1,154 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "types.proto";
+import "objects.proto";
+import "gogoproto/gogo.proto";
+import "plugin/plugin.proto";
+import "duration/duration.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+
+// Dispatcher is the API provided by a manager group for agents to connect to. Agents
+// connect to this service to receive task assignments and report status.
+//
+// API methods on this service are used only by agent nodes.
+service Dispatcher { // maybe dispatch, al likes this
+ // Session starts an agent session with the dispatcher. The session is
+ // started after the first SessionMessage is received.
+ //
+ // Once started, the agent is controlled with a stream of SessionMessage.
+ // Agents should list on the stream at all times for instructions.
+ rpc Session(SessionRequest) returns (stream SessionMessage) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
+ };
+
+ // Heartbeat is heartbeat method for nodes. It returns new TTL in response.
+ // Node should send new heartbeat earlier than now + TTL, otherwise it will
+ // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
+ rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
+ };
+
+ // UpdateTaskStatus updates status of task. Node should send such updates
+ // on every status change of its tasks.
+ //
+ // Whether receiving batch updates or single status updates, this method
+ // should be accepting. Errors should only be returned if the entire update
+ // should be retried, due to data loss or other problems.
+ //
+ // If a task is unknown the dispatcher, the status update should be
+ // accepted regardless.
+ rpc UpdateTaskStatus(UpdateTaskStatusRequest) returns (UpdateTaskStatusResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
+ };
+
+ // Tasks is a stream of tasks state for node. Each message contains full list
+ // of tasks which should be run on node, if task is not present in that list,
+ // it should be terminated.
+ rpc Tasks(TasksRequest) returns (stream TasksMessage) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
+ };
+}
+
+// SessionRequest starts a session.
+message SessionRequest {
+ NodeDescription description = 1;
+}
+
+// SessionMessage instructs an agent on various actions as part of the current
+// session. An agent should act immediately on the contents.
+message SessionMessage {
+ // SessionID is allocated after a successful registration. It should be
+ // used on all RPC calls after registration. A dispatcher may choose to
+ // change the SessionID, at which time an agent must re-register and obtain
+ // a new one.
+ //
+ // All Dispatcher calls after register should include the SessionID. If the
+ // Dispatcher so chooses, it may reject the call with an InvalidArgument
+ // error code, at which time the agent should call Register to start a new
+ // session.
+ //
+ // As a rule, once an agent has a SessionID, it should never save it to
+ // disk or try to otherwise reuse. If the agent loses its SessionID, it
+ // must start a new session through a call to Register. A Dispatcher may
+ // choose to reuse the SessionID, if it sees fit, but it is not advised.
+ //
+ // The actual implementation of the SessionID is Dispatcher specific and
+ // should be treated as opaque by agents.
+ //
+ // From a Dispatcher perspective, there are many ways to use the SessionID
+ // to ensure uniqueness of a set of client RPC calls. One method is to keep
+ // the SessionID unique to every call to Register in a single Dispatcher
+ // instance. This ensures that the SessionID represents the unique
+ // session from a single Agent to Manager. If the Agent restarts, we
+ // allocate a new session, since the restarted Agent is not aware of the
+ // new SessionID.
+ //
+ // The most compelling use case is to support duplicate node detection. If
+ // one clones a virtual machine, including certificate material, two nodes
+ // may end up with the same identity. This can also happen if two identical
+ // agent processes are coming from the same node. If the SessionID is
+ // replicated through the cluster, we can immediately detect the condition
+ // and address it.
+ //
+ // Extending from the case above, we can actually detect a compromised
+ // identity. Coupled with provisions to rebuild node identity, we can ban
+ // the compromised node identity and have the nodes re-authenticate and
+ // build a new identity. At this time, an administrator can then
+ // re-authorize the compromised nodes, if it was a mistake or ensure that a
+ // misbehaved node can no longer connect to the cluster.
+ //
+ // We considered placing this field in a GRPC header. Because this is a
+ // critical feature of the protocol, we thought it should be represented
+ // directly in the RPC message set.
+ string session_id = 1 [(gogoproto.customname) = "SessionID"];
+
+ // Node identifies the registering node.
+ Node node = 2;
+
+ // Managers provides a weight list of alternative dispatchers
+ repeated WeightedPeer managers = 3;
+
+ // Symmetric encryption key distributed by the lead manager. Used by agents
+ // for securing network bootstrapping and communication.
+ repeated EncryptionKey network_bootstrap_keys = 4;
+}
+
+// HeartbeatRequest provides identifying properties for a single heartbeat.
+message HeartbeatRequest {
+ string session_id = 1 [(gogoproto.customname) = "SessionID"];
+}
+
+message HeartbeatResponse {
+ // Period is the duration to wait before sending the next heartbeat.
+ // Well-behaved agents should update this on every heartbeat round trip.
+ Duration period = 1 [(gogoproto.nullable) = false];
+}
+
+message UpdateTaskStatusRequest {
+ // Tasks should contain all statuses for running tasks. Only the status
+ // field must be set. The spec is not required.
+ string session_id = 1 [(gogoproto.customname) = "SessionID"];
+
+ message TaskStatusUpdate {
+ string task_id = 1 [(gogoproto.customname) = "TaskID"];
+ TaskStatus status = 2;
+ }
+
+ repeated TaskStatusUpdate updates = 3;
+}
+
+message UpdateTaskStatusResponse{
+ // void
+}
+
+message TasksRequest {
+ string session_id = 1 [(gogoproto.customname) = "SessionID"];
+}
+
+message TasksMessage {
+ // Tasks is the set of tasks that should be running on the node.
+ // Tasks outside of this set running on the node should be terminated.
+ repeated Task tasks = 1;
+}
+
diff --git a/vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go b/vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go
new file mode 100644
index 0000000000..a186e349c9
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go
@@ -0,0 +1,456 @@
+// Code generated by protoc-gen-gogo.
+// source: duration.proto
+// DO NOT EDIT!
+
+/*
+ Package duration is a generated protocol buffer package.
+
+ It is generated from these files:
+ duration.proto
+
+ It has these top-level messages:
+ Duration
+*/
+package duration
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+type Duration struct {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorDuration, []int{0} }
+
+func init() {
+ proto.RegisterType((*Duration)(nil), "docker.swarmkit.v1.Duration")
+}
+
+func (m *Duration) Copy() *Duration {
+ if m == nil {
+ return nil
+ }
+
+ o := &Duration{
+ Seconds: m.Seconds,
+ Nanos: m.Nanos,
+ }
+
+ return o
+}
+
+func (this *Duration) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&duration.Duration{")
+ s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
+ s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringDuration(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringDuration(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *Duration) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Duration) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Seconds != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintDuration(data, i, uint64(m.Seconds))
+ }
+ if m.Nanos != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintDuration(data, i, uint64(m.Nanos))
+ }
+ return i, nil
+}
+
+func encodeFixed64Duration(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Duration(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintDuration(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+func (m *Duration) Size() (n int) {
+ var l int
+ _ = l
+ if m.Seconds != 0 {
+ n += 1 + sovDuration(uint64(m.Seconds))
+ }
+ if m.Nanos != 0 {
+ n += 1 + sovDuration(uint64(m.Nanos))
+ }
+ return n
+}
+
+func sovDuration(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozDuration(x uint64) (n int) {
+ return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Duration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Duration{`,
+ `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`,
+ `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringDuration(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Duration) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDuration
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Duration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
+ }
+ m.Seconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDuration
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Seconds |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
+ }
+ m.Nanos = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDuration
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Nanos |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDuration(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDuration
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipDuration(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDuration
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDuration
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDuration
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthDuration
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDuration
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipDuration(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorDuration = []byte{
+ // 193 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a,
+ 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce,
+ 0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x54, 0xb2,
+ 0xe2, 0xe2, 0x70, 0x81, 0xaa, 0x12, 0x92, 0xe0, 0x62, 0x2f, 0x4e, 0x4d, 0xce, 0xcf, 0x4b, 0x29,
+ 0x96, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x82, 0x71, 0x85, 0x44, 0xb8, 0x58, 0xf3, 0x12, 0xf3,
+ 0xf2, 0x8b, 0x25, 0x98, 0x80, 0xe2, 0xac, 0x41, 0x10, 0x8e, 0x53, 0xce, 0x89, 0x87, 0x72, 0x0c,
+ 0x37, 0x80, 0xf8, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10,
+ 0x3f, 0x00, 0x62, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0xbd, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0x88,
+ 0xfd, 0x49, 0xa5, 0x69, 0x4e, 0xbc, 0x30, 0xcb, 0x02, 0x40, 0x22, 0x01, 0x8c, 0x0b, 0x18, 0x19,
+ 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0xa8, 0x0d, 0x80, 0xaa, 0xd5,
+ 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62,
+ 0x03, 0x1b, 0x62, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x66, 0x02, 0x1a, 0xa4, 0xd6, 0x00, 0x00,
+ 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/duration/duration.proto b/vendor/src/github.com/docker/swarmkit/api/duration/duration.proto
new file mode 100644
index 0000000000..aa55556e03
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/duration/duration.proto
@@ -0,0 +1,100 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+// TODO(stevvooe): Commenting this out from the maddening behavior of google's
+// Go protobuf implementation.
+//option go_package = "github.com/golang/protobuf/ptypes/duration";
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+message Duration {
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive.
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/duration/gen.go b/vendor/src/github.com/docker/swarmkit/api/duration/gen.go
new file mode 100644
index 0000000000..cdb1435308
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/duration/gen.go
@@ -0,0 +1,3 @@
+//go:generate protoc -I.:../../vendor:../../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api/duration,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. duration.proto
+
+package duration
diff --git a/vendor/src/github.com/docker/swarmkit/api/gen.go b/vendor/src/github.com/docker/swarmkit/api/gen.go
new file mode 100644
index 0000000000..46eaf9e4f6
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/gen.go
@@ -0,0 +1,3 @@
+package api
+
+//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mtimestamp/timestamp.proto=github.com/docker/swarmkit/api/timestamp,Mduration/duration.proto=github.com/docker/swarmkit/api/duration,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto
diff --git a/vendor/src/github.com/docker/swarmkit/api/objects.pb.go b/vendor/src/github.com/docker/swarmkit/api/objects.pb.go
new file mode 100644
index 0000000000..82dc51e34a
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/objects.pb.go
@@ -0,0 +1,3469 @@
+// Code generated by protoc-gen-gogo.
+// source: objects.proto
+// DO NOT EDIT!
+
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import docker_swarmkit_v1 "github.com/docker/swarmkit/api/timestamp"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Meta contains metadata about objects. Every object contains a meta field.
+type Meta struct {
+ // Version tracks the current version of the object.
+ Version Version `protobuf:"bytes,1,opt,name=version" json:"version"`
+ // Object timestamps.
+ CreatedAt *docker_swarmkit_v1.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt" json:"created_at,omitempty"`
+ UpdatedAt *docker_swarmkit_v1.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt" json:"updated_at,omitempty"`
+}
+
+func (m *Meta) Reset() { *m = Meta{} }
+func (*Meta) ProtoMessage() {}
+func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{0} }
+
+// Node provides the internal node state as seen by the cluster.
+type Node struct {
+ // ID specifies the identity of the node.
+ ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"`
+ // Spec defines the desired state of the node as specified by the user.
+ // The system will honor this and will *never* modify it.
+ Spec NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"`
+ // Description encapsulated the properties of the Node as reported by the
+ // agent.
+ Description *NodeDescription `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"`
+ // Status provides the current status of the node, as seen by the manager.
+ Status NodeStatus `protobuf:"bytes,5,opt,name=status" json:"status"`
+ // Status of the manager. If the node is not a manager, this field will not
+ // be set.
+ ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"`
+ // The node attachment to the ingress network.
+ Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"`
+ // Certificate is the TLS certificate issued for the node, if any.
+ Certificate Certificate `protobuf:"bytes,8,opt,name=certificate" json:"certificate"`
+}
+
+func (m *Node) Reset() { *m = Node{} }
+func (*Node) ProtoMessage() {}
+func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{1} }
+
+type Service struct {
+ ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"`
+ Spec ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"`
+ // Runtime state of service endpoint. This may be different
+ // from the spec version because the user may not have entered
+ // the optional fields like node_port or virtual_ip and it
+ // could be auto allocated by the system.
+ Endpoint *Endpoint `protobuf:"bytes,4,opt,name=endpoint" json:"endpoint,omitempty"`
+}
+
+func (m *Service) Reset() { *m = Service{} }
+func (*Service) ProtoMessage() {}
+func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{2} }
+
+// Endpoint specified all the network parameters required to
+// correctly discover and load balance a service
+type Endpoint struct {
+ Spec *EndpointSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"`
+ // Runtime state of the exposed ports which may carry
+ // auto-allocated swarm ports in addition to the user
+ // configured information.
+ Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"`
+ // VirtualIPs specifies the IP addresses under which this endpoint will be
+ // made available.
+ VirtualIPs []*Endpoint_VirtualIP `protobuf:"bytes,3,rep,name=virtual_ips,json=virtualIps" json:"virtual_ips,omitempty"`
+}
+
+func (m *Endpoint) Reset() { *m = Endpoint{} }
+func (*Endpoint) ProtoMessage() {}
+func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3} }
+
+// VirtualIP specifies a set of networks this endpoint will be attached to
+// and the IP addresses the target service will be made available under.
+type Endpoint_VirtualIP struct {
+ // NetworkID for which this endpoint attachment was created.
+ NetworkID string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
+ // A virtual IP is used to address this service in IP
+ // layer that the client can use to send requests to
+ // this service. A DNS A/AAAA query on the service
+ // name might return this IP to the client. This is
+ // strictly a logical IP and there may not be any
+ // interfaces assigned this IP address or any route
+ // created for this address. More than one to
+ // accomodate for both IPv4 and IPv6
+ Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
+}
+
+func (m *Endpoint_VirtualIP) Reset() { *m = Endpoint_VirtualIP{} }
+func (*Endpoint_VirtualIP) ProtoMessage() {}
+func (*Endpoint_VirtualIP) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3, 0} }
+
+// Task specifies the parameters for implementing a Spec. A task is effectively
+// immutable and idempotent. Once it is dispatched to a node, it will not be
+// dispatched to another node.
+type Task struct {
+ ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"`
+ // Spec defines the desired state of the task as specified by the user.
+ // The system will honor this and will *never* modify it.
+ Spec TaskSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"`
+ // ServiceID indicates the service under which this task is orchestrated. This
+ // should almost always be set.
+ ServiceID string `protobuf:"bytes,4,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ // Slot is the service slot number for a task.
+ // For example, if a replicated service has replicas = 2, there will be a
+ // task with slot = 1, and another with slot = 2.
+ Slot uint64 `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty"`
+ // NodeID indicates the node to which the task is assigned. If this field
+ // is empty or not set, the task is unassigned.
+ NodeID string `protobuf:"bytes,6,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ // Annotations defines the names and labels for the runtime, as set by
+ // the cluster manager.
+ //
+ // As backup, if this field has an empty name, the runtime will
+ // allocate a unique name for the actual container.
+ //
+ // NOTE(stevvooe): The preserves the ability for us to making naming
+ // decisions for tasks in orchestrator, albeit, this is left empty for now.
+ Annotations Annotations `protobuf:"bytes,7,opt,name=annotations" json:"annotations"`
+ // ServiceAnnotations is a direct copy of the service name and labels when
+ // this task is created.
+ //
+ // Labels set here will *not* be propagated to the runtime target, such as a
+ // container. Use labels on the runtime target for that purpose.
+ ServiceAnnotations Annotations `protobuf:"bytes,8,opt,name=service_annotations,json=serviceAnnotations" json:"service_annotations"`
+ Status TaskStatus `protobuf:"bytes,9,opt,name=status" json:"status"`
+ // DesiredState is the target state for the task. It is set to
+ // TaskStateRunning when a task is first created, and changed to
+ // TaskStateShutdown if the manager wants to terminate the task. This field
+ // is only written by the manager.
+ DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState" json:"desired_state,omitempty"`
+ // List of network attachments by the task.
+ Networks []*NetworkAttachment `protobuf:"bytes,11,rep,name=networks" json:"networks,omitempty"`
+ // A copy of runtime state of service endpoint from Service
+ // object to be distributed to agents as part of the task.
+ Endpoint *Endpoint `protobuf:"bytes,12,opt,name=endpoint" json:"endpoint,omitempty"`
+}
+
+func (m *Task) Reset() { *m = Task{} }
+func (*Task) ProtoMessage() {}
+func (*Task) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{4} }
+
+// NetworkAttachment specifies the network parameters of attachment to
+// a single network by an object such as task or node.
+type NetworkAttachment struct {
+ // Network state as a whole becomes part of the object so that
+ // it always is available for use in agents so that agents
+ // don't have any other dependency during execution.
+ Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"`
+ // List of IPv4/IPv6 addresses that are assigned to the object
+ // as part of getting attached to this network.
+ Addresses []string `protobuf:"bytes,2,rep,name=addresses" json:"addresses,omitempty"`
+}
+
+func (m *NetworkAttachment) Reset() { *m = NetworkAttachment{} }
+func (*NetworkAttachment) ProtoMessage() {}
+func (*NetworkAttachment) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{5} }
+
+type Network struct {
+ ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"`
+ Spec NetworkSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"`
+ // Driver specific operational state provided by the network driver.
+ DriverState *Driver `protobuf:"bytes,4,opt,name=driver_state,json=driverState" json:"driver_state,omitempty"`
+ // Runtime state of IPAM options. This may not reflect the
+ // ipam options from NetworkSpec.
+ IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"`
+}
+
+func (m *Network) Reset() { *m = Network{} }
+func (*Network) ProtoMessage() {}
+func (*Network) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{6} }
+
+// Cluster provides global cluster settings.
+type Cluster struct {
+ ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"`
+ Spec ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"`
+ // RootCA contains key material for the root CA.
+ RootCA RootCA `protobuf:"bytes,4,opt,name=root_ca,json=rootCa" json:"root_ca"`
+ // Symmetric encryption key distributed by the lead manager. Used by agents
+ // for securing network bootstrapping and communication.
+ NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,5,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"`
+ // Logical clock used to timestamp every key. It allows other managers
+ // and agents to unambiguously identify the older key to be deleted when
+ // a new key is allocated on key rotation.
+ EncryptionKeyLamportClock uint64 `protobuf:"varint,6,opt,name=encryption_key_lamport_clock,json=encryptionKeyLamportClock,proto3" json:"encryption_key_lamport_clock,omitempty"`
+}
+
+func (m *Cluster) Reset() { *m = Cluster{} }
+func (*Cluster) ProtoMessage() {}
+func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{7} }
+
+func init() {
+ proto.RegisterType((*Meta)(nil), "docker.swarmkit.v1.Meta")
+ proto.RegisterType((*Node)(nil), "docker.swarmkit.v1.Node")
+ proto.RegisterType((*Service)(nil), "docker.swarmkit.v1.Service")
+ proto.RegisterType((*Endpoint)(nil), "docker.swarmkit.v1.Endpoint")
+ proto.RegisterType((*Endpoint_VirtualIP)(nil), "docker.swarmkit.v1.Endpoint.VirtualIP")
+ proto.RegisterType((*Task)(nil), "docker.swarmkit.v1.Task")
+ proto.RegisterType((*NetworkAttachment)(nil), "docker.swarmkit.v1.NetworkAttachment")
+ proto.RegisterType((*Network)(nil), "docker.swarmkit.v1.Network")
+ proto.RegisterType((*Cluster)(nil), "docker.swarmkit.v1.Cluster")
+}
+
+func (m *Meta) Copy() *Meta {
+ if m == nil {
+ return nil
+ }
+
+ o := &Meta{
+ Version: *m.Version.Copy(),
+ CreatedAt: m.CreatedAt.Copy(),
+ UpdatedAt: m.UpdatedAt.Copy(),
+ }
+
+ return o
+}
+
+func (m *Node) Copy() *Node {
+ if m == nil {
+ return nil
+ }
+
+ o := &Node{
+ ID: m.ID,
+ Meta: *m.Meta.Copy(),
+ Spec: *m.Spec.Copy(),
+ Description: m.Description.Copy(),
+ Status: *m.Status.Copy(),
+ ManagerStatus: m.ManagerStatus.Copy(),
+ Attachment: m.Attachment.Copy(),
+ Certificate: *m.Certificate.Copy(),
+ }
+
+ return o
+}
+
+func (m *Service) Copy() *Service {
+ if m == nil {
+ return nil
+ }
+
+ o := &Service{
+ ID: m.ID,
+ Meta: *m.Meta.Copy(),
+ Spec: *m.Spec.Copy(),
+ Endpoint: m.Endpoint.Copy(),
+ }
+
+ return o
+}
+
+func (m *Endpoint) Copy() *Endpoint {
+ if m == nil {
+ return nil
+ }
+
+ o := &Endpoint{
+ Spec: m.Spec.Copy(),
+ }
+
+ if m.Ports != nil {
+ o.Ports = make([]*PortConfig, 0, len(m.Ports))
+ for _, v := range m.Ports {
+ o.Ports = append(o.Ports, v.Copy())
+ }
+ }
+
+ if m.VirtualIPs != nil {
+ o.VirtualIPs = make([]*Endpoint_VirtualIP, 0, len(m.VirtualIPs))
+ for _, v := range m.VirtualIPs {
+ o.VirtualIPs = append(o.VirtualIPs, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *Endpoint_VirtualIP) Copy() *Endpoint_VirtualIP {
+ if m == nil {
+ return nil
+ }
+
+ o := &Endpoint_VirtualIP{
+ NetworkID: m.NetworkID,
+ Addr: m.Addr,
+ }
+
+ return o
+}
+
+func (m *Task) Copy() *Task {
+ if m == nil {
+ return nil
+ }
+
+ o := &Task{
+ ID: m.ID,
+ Meta: *m.Meta.Copy(),
+ Spec: *m.Spec.Copy(),
+ ServiceID: m.ServiceID,
+ Slot: m.Slot,
+ NodeID: m.NodeID,
+ Annotations: *m.Annotations.Copy(),
+ ServiceAnnotations: *m.ServiceAnnotations.Copy(),
+ Status: *m.Status.Copy(),
+ DesiredState: m.DesiredState,
+ Endpoint: m.Endpoint.Copy(),
+ }
+
+ if m.Networks != nil {
+ o.Networks = make([]*NetworkAttachment, 0, len(m.Networks))
+ for _, v := range m.Networks {
+ o.Networks = append(o.Networks, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *NetworkAttachment) Copy() *NetworkAttachment {
+ if m == nil {
+ return nil
+ }
+
+ o := &NetworkAttachment{
+ Network: m.Network.Copy(),
+ }
+
+ if m.Addresses != nil {
+ o.Addresses = make([]string, 0, len(m.Addresses))
+ for _, v := range m.Addresses {
+ o.Addresses = append(o.Addresses, v)
+ }
+ }
+
+ return o
+}
+
+func (m *Network) Copy() *Network {
+ if m == nil {
+ return nil
+ }
+
+ o := &Network{
+ ID: m.ID,
+ Meta: *m.Meta.Copy(),
+ Spec: *m.Spec.Copy(),
+ DriverState: m.DriverState.Copy(),
+ IPAM: m.IPAM.Copy(),
+ }
+
+ return o
+}
+
+func (m *Cluster) Copy() *Cluster {
+ if m == nil {
+ return nil
+ }
+
+ o := &Cluster{
+ ID: m.ID,
+ Meta: *m.Meta.Copy(),
+ Spec: *m.Spec.Copy(),
+ RootCA: *m.RootCA.Copy(),
+ EncryptionKeyLamportClock: m.EncryptionKeyLamportClock,
+ }
+
+ if m.NetworkBootstrapKeys != nil {
+ o.NetworkBootstrapKeys = make([]*EncryptionKey, 0, len(m.NetworkBootstrapKeys))
+ for _, v := range m.NetworkBootstrapKeys {
+ o.NetworkBootstrapKeys = append(o.NetworkBootstrapKeys, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (this *Meta) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.Meta{")
+ s = append(s, "Version: "+strings.Replace(this.Version.GoString(), `&`, ``, 1)+",\n")
+ if this.CreatedAt != nil {
+ s = append(s, "CreatedAt: "+fmt.Sprintf("%#v", this.CreatedAt)+",\n")
+ }
+ if this.UpdatedAt != nil {
+ s = append(s, "UpdatedAt: "+fmt.Sprintf("%#v", this.UpdatedAt)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Node) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 12)
+ s = append(s, "&api.Node{")
+ s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
+ s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Spec: "+strings.Replace(this.Spec.GoString(), `&`, ``, 1)+",\n")
+ if this.Description != nil {
+ s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
+ }
+ s = append(s, "Status: "+strings.Replace(this.Status.GoString(), `&`, ``, 1)+",\n")
+ if this.ManagerStatus != nil {
+ s = append(s, "ManagerStatus: "+fmt.Sprintf("%#v", this.ManagerStatus)+",\n")
+ }
+ if this.Attachment != nil {
+ s = append(s, "Attachment: "+fmt.Sprintf("%#v", this.Attachment)+",\n")
+ }
+ s = append(s, "Certificate: "+strings.Replace(this.Certificate.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Service) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.Service{")
+ s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
+ s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Spec: "+strings.Replace(this.Spec.GoString(), `&`, ``, 1)+",\n")
+ if this.Endpoint != nil {
+ s = append(s, "Endpoint: "+fmt.Sprintf("%#v", this.Endpoint)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Endpoint) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.Endpoint{")
+ if this.Spec != nil {
+ s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
+ }
+ if this.Ports != nil {
+ s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n")
+ }
+ if this.VirtualIPs != nil {
+ s = append(s, "VirtualIPs: "+fmt.Sprintf("%#v", this.VirtualIPs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Endpoint_VirtualIP) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.Endpoint_VirtualIP{")
+ s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n")
+ s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Task) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 16)
+ s = append(s, "&api.Task{")
+ s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
+ s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Spec: "+strings.Replace(this.Spec.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n")
+ s = append(s, "Slot: "+fmt.Sprintf("%#v", this.Slot)+",\n")
+ s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+ s = append(s, "Annotations: "+strings.Replace(this.Annotations.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "ServiceAnnotations: "+strings.Replace(this.ServiceAnnotations.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Status: "+strings.Replace(this.Status.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "DesiredState: "+fmt.Sprintf("%#v", this.DesiredState)+",\n")
+ if this.Networks != nil {
+ s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n")
+ }
+ if this.Endpoint != nil {
+ s = append(s, "Endpoint: "+fmt.Sprintf("%#v", this.Endpoint)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NetworkAttachment) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.NetworkAttachment{")
+ if this.Network != nil {
+ s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n")
+ }
+ s = append(s, "Addresses: "+fmt.Sprintf("%#v", this.Addresses)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Network) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.Network{")
+ s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
+ s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Spec: "+strings.Replace(this.Spec.GoString(), `&`, ``, 1)+",\n")
+ if this.DriverState != nil {
+ s = append(s, "DriverState: "+fmt.Sprintf("%#v", this.DriverState)+",\n")
+ }
+ if this.IPAM != nil {
+ s = append(s, "IPAM: "+fmt.Sprintf("%#v", this.IPAM)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Cluster) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&api.Cluster{")
+ s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
+ s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Spec: "+strings.Replace(this.Spec.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "RootCA: "+strings.Replace(this.RootCA.GoString(), `&`, ``, 1)+",\n")
+ if this.NetworkBootstrapKeys != nil {
+ s = append(s, "NetworkBootstrapKeys: "+fmt.Sprintf("%#v", this.NetworkBootstrapKeys)+",\n")
+ }
+ s = append(s, "EncryptionKeyLamportClock: "+fmt.Sprintf("%#v", this.EncryptionKeyLamportClock)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringObjects(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringObjects(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *Meta) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Meta) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Version.Size()))
+ n1, err := m.Version.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ if m.CreatedAt != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.CreatedAt.Size()))
+ n2, err := m.CreatedAt.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if m.UpdatedAt != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.UpdatedAt.Size()))
+ n3, err := m.UpdatedAt.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ return i, nil
+}
+
+func (m *Node) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Node) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.ID)))
+ i += copy(data[i:], m.ID)
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
+ n4, err := m.Meta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ data[i] = 0x1a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
+ n5, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ if m.Description != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Description.Size()))
+ n6, err := m.Description.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ data[i] = 0x2a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Status.Size()))
+ n7, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ if m.ManagerStatus != nil {
+ data[i] = 0x32
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.ManagerStatus.Size()))
+ n8, err := m.ManagerStatus.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ if m.Attachment != nil {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Attachment.Size()))
+ n9, err := m.Attachment.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ data[i] = 0x42
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Certificate.Size()))
+ n10, err := m.Certificate.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ return i, nil
+}
+
+func (m *Service) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Service) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.ID)))
+ i += copy(data[i:], m.ID)
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
+ n11, err := m.Meta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ data[i] = 0x1a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
+ n12, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ if m.Endpoint != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Endpoint.Size()))
+ n13, err := m.Endpoint.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ return i, nil
+}
+
+func (m *Endpoint) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Endpoint) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Spec != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
+ n14, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ if len(m.Ports) > 0 {
+ for _, msg := range m.Ports {
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.VirtualIPs) > 0 {
+ for _, msg := range m.VirtualIPs {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintObjects(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Endpoint_VirtualIP) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Endpoint_VirtualIP) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NetworkID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.NetworkID)))
+ i += copy(data[i:], m.NetworkID)
+ }
+ if len(m.Addr) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.Addr)))
+ i += copy(data[i:], m.Addr)
+ }
+ return i, nil
+}
+
+func (m *Task) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Task) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.ID)))
+ i += copy(data[i:], m.ID)
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
+ n15, err := m.Meta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ data[i] = 0x1a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
+ n16, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ if len(m.ServiceID) > 0 {
+ data[i] = 0x22
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.ServiceID)))
+ i += copy(data[i:], m.ServiceID)
+ }
+ if m.Slot != 0 {
+ data[i] = 0x28
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Slot))
+ }
+ if len(m.NodeID) > 0 {
+ data[i] = 0x32
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.NodeID)))
+ i += copy(data[i:], m.NodeID)
+ }
+ data[i] = 0x3a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Annotations.Size()))
+ n17, err := m.Annotations.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ data[i] = 0x42
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.ServiceAnnotations.Size()))
+ n18, err := m.ServiceAnnotations.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ data[i] = 0x4a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Status.Size()))
+ n19, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ if m.DesiredState != 0 {
+ data[i] = 0x50
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.DesiredState))
+ }
+ if len(m.Networks) > 0 {
+ for _, msg := range m.Networks {
+ data[i] = 0x5a
+ i++
+ i = encodeVarintObjects(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.Endpoint != nil {
+ data[i] = 0x62
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Endpoint.Size()))
+ n20, err := m.Endpoint.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ return i, nil
+}
+
+func (m *NetworkAttachment) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkAttachment) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Network != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Network.Size()))
+ n21, err := m.Network.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ }
+ if len(m.Addresses) > 0 {
+ for _, s := range m.Addresses {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *Network) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Network) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.ID)))
+ i += copy(data[i:], m.ID)
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
+ n22, err := m.Meta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ data[i] = 0x1a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
+ n23, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ if m.DriverState != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.DriverState.Size()))
+ n24, err := m.DriverState.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n24
+ }
+ if m.IPAM != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.IPAM.Size()))
+ n25, err := m.IPAM.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n25
+ }
+ return i, nil
+}
+
+func (m *Cluster) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Cluster) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintObjects(data, i, uint64(len(m.ID)))
+ i += copy(data[i:], m.ID)
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
+ n26, err := m.Meta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n26
+ data[i] = 0x1a
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
+ n27, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n27
+ data[i] = 0x22
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.RootCA.Size()))
+ n28, err := m.RootCA.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n28
+ if len(m.NetworkBootstrapKeys) > 0 {
+ for _, msg := range m.NetworkBootstrapKeys {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintObjects(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.EncryptionKeyLamportClock != 0 {
+ data[i] = 0x30
+ i++
+ i = encodeVarintObjects(data, i, uint64(m.EncryptionKeyLamportClock))
+ }
+ return i, nil
+}
+
+func encodeFixed64Objects(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Objects(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintObjects(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+func (m *Meta) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Version.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ if m.CreatedAt != nil {
+ l = m.CreatedAt.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ if m.UpdatedAt != nil {
+ l = m.UpdatedAt.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ return n
+}
+
+func (m *Node) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Meta.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ if m.Description != nil {
+ l = m.Description.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Status.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ if m.ManagerStatus != nil {
+ l = m.ManagerStatus.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ if m.Attachment != nil {
+ l = m.Attachment.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Certificate.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ return n
+}
+
+func (m *Service) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Meta.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ if m.Endpoint != nil {
+ l = m.Endpoint.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ return n
+}
+
+func (m *Endpoint) Size() (n int) {
+ var l int
+ _ = l
+ if m.Spec != nil {
+ l = m.Spec.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ }
+ if len(m.VirtualIPs) > 0 {
+ for _, e := range m.VirtualIPs {
+ l = e.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Endpoint_VirtualIP) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NetworkID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = len(m.Addr)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ return n
+}
+
+func (m *Task) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Meta.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = len(m.ServiceID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ if m.Slot != 0 {
+ n += 1 + sovObjects(uint64(m.Slot))
+ }
+ l = len(m.NodeID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Annotations.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.ServiceAnnotations.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ if m.DesiredState != 0 {
+ n += 1 + sovObjects(uint64(m.DesiredState))
+ }
+ if len(m.Networks) > 0 {
+ for _, e := range m.Networks {
+ l = e.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ }
+ if m.Endpoint != nil {
+ l = m.Endpoint.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ return n
+}
+
+func (m *NetworkAttachment) Size() (n int) {
+ var l int
+ _ = l
+ if m.Network != nil {
+ l = m.Network.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ if len(m.Addresses) > 0 {
+ for _, s := range m.Addresses {
+ l = len(s)
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Network) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Meta.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ if m.DriverState != nil {
+ l = m.DriverState.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ if m.IPAM != nil {
+ l = m.IPAM.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ return n
+}
+
+func (m *Cluster) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ l = m.Meta.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ l = m.RootCA.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ if len(m.NetworkBootstrapKeys) > 0 {
+ for _, e := range m.NetworkBootstrapKeys {
+ l = e.Size()
+ n += 1 + l + sovObjects(uint64(l))
+ }
+ }
+ if m.EncryptionKeyLamportClock != 0 {
+ n += 1 + sovObjects(uint64(m.EncryptionKeyLamportClock))
+ }
+ return n
+}
+
+func sovObjects(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozObjects(x uint64) (n int) {
+ return sovObjects(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Meta) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Meta{`,
+ `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`,
+ `CreatedAt:` + strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "docker_swarmkit_v1.Timestamp", 1) + `,`,
+ `UpdatedAt:` + strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "docker_swarmkit_v1.Timestamp", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Node) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Node{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`,
+ `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`,
+ `ManagerStatus:` + strings.Replace(fmt.Sprintf("%v", this.ManagerStatus), "ManagerStatus", "ManagerStatus", 1) + `,`,
+ `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachment", "NetworkAttachment", 1) + `,`,
+ `Certificate:` + strings.Replace(strings.Replace(this.Certificate.String(), "Certificate", "Certificate", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Service) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Service{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`,
+ `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Endpoint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Endpoint{`,
+ `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "EndpointSpec", "EndpointSpec", 1) + `,`,
+ `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`,
+ `VirtualIPs:` + strings.Replace(fmt.Sprintf("%v", this.VirtualIPs), "Endpoint_VirtualIP", "Endpoint_VirtualIP", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Endpoint_VirtualIP) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Endpoint_VirtualIP{`,
+ `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`,
+ `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Task) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Task{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`,
+ `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`,
+ `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`,
+ `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+ `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`,
+ `ServiceAnnotations:` + strings.Replace(strings.Replace(this.ServiceAnnotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TaskStatus", "TaskStatus", 1), `&`, ``, 1) + `,`,
+ `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`,
+ `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachment", "NetworkAttachment", 1) + `,`,
+ `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkAttachment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkAttachment{`,
+ `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`,
+ `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Network) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Network{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkSpec", "NetworkSpec", 1), `&`, ``, 1) + `,`,
+ `DriverState:` + strings.Replace(fmt.Sprintf("%v", this.DriverState), "Driver", "Driver", 1) + `,`,
+ `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Cluster) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Cluster{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`,
+ `RootCA:` + strings.Replace(strings.Replace(this.RootCA.String(), "RootCA", "RootCA", 1), `&`, ``, 1) + `,`,
+ `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`,
+ `EncryptionKeyLamportClock:` + fmt.Sprintf("%v", this.EncryptionKeyLamportClock) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringObjects(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Meta) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Meta: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Version.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CreatedAt == nil {
+ m.CreatedAt = &docker_swarmkit_v1.Timestamp{}
+ }
+ if err := m.CreatedAt.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpdatedAt == nil {
+ m.UpdatedAt = &docker_swarmkit_v1.Timestamp{}
+ }
+ if err := m.UpdatedAt.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Node) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Node: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Meta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Description == nil {
+ m.Description = &NodeDescription{}
+ }
+ if err := m.Description.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ManagerStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ManagerStatus == nil {
+ m.ManagerStatus = &ManagerStatus{}
+ }
+ if err := m.ManagerStatus.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Attachment == nil {
+ m.Attachment = &NetworkAttachment{}
+ }
+ if err := m.Attachment.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Certificate.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Service) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Service: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Meta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Endpoint == nil {
+ m.Endpoint = &Endpoint{}
+ }
+ if err := m.Endpoint.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Endpoint) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Endpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Spec == nil {
+ m.Spec = &EndpointSpec{}
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, &PortConfig{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VirtualIPs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VirtualIPs = append(m.VirtualIPs, &Endpoint_VirtualIP{})
+ if err := m.VirtualIPs[len(m.VirtualIPs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Endpoint_VirtualIP) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VirtualIP: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VirtualIP: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addr = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Task) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Task: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Meta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType)
+ }
+ m.Slot = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Slot |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Annotations.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAnnotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ServiceAnnotations.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType)
+ }
+ m.DesiredState = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.DesiredState |= (TaskState(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Networks = append(m.Networks, &NetworkAttachment{})
+ if err := m.Networks[len(m.Networks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Endpoint == nil {
+ m.Endpoint = &Endpoint{}
+ }
+ if err := m.Endpoint.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkAttachment) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkAttachment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkAttachment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Network == nil {
+ m.Network = &Network{}
+ }
+ if err := m.Network.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addresses = append(m.Addresses, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Network) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Network: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Meta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DriverState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DriverState == nil {
+ m.DriverState = &Driver{}
+ }
+ if err := m.DriverState.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IPAM == nil {
+ m.IPAM = &IPAMOptions{}
+ }
+ if err := m.IPAM.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Cluster) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Cluster: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Meta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RootCA.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthObjects
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{})
+ if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EncryptionKeyLamportClock", wireType)
+ }
+ m.EncryptionKeyLamportClock = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.EncryptionKeyLamportClock |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipObjects(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthObjects
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipObjects(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthObjects
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowObjects
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipObjects(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthObjects = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowObjects = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorObjects = []byte{
+ // 949 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6e, 0x1b, 0x45,
+ 0x18, 0xaf, 0x9d, 0x8d, 0xed, 0xfd, 0x9c, 0x44, 0x62, 0xa8, 0x2a, 0x37, 0x84, 0xa4, 0xb8, 0x02,
+ 0x71, 0x40, 0xae, 0x28, 0x05, 0x81, 0xa0, 0x42, 0xb6, 0x13, 0x81, 0x05, 0x81, 0x68, 0x5a, 0x85,
+ 0xa3, 0x35, 0xd9, 0x9d, 0xa6, 0x8b, 0xed, 0xdd, 0xd5, 0xcc, 0xc4, 0x55, 0x6e, 0x3c, 0x01, 0x12,
+ 0x2f, 0xc0, 0xab, 0x70, 0x8d, 0x38, 0x71, 0xe4, 0x54, 0xd1, 0xde, 0x38, 0xc1, 0x23, 0xf0, 0xcd,
+ 0xec, 0xb7, 0xeb, 0xad, 0xbc, 0x8e, 0x1a, 0x09, 0xe5, 0xb0, 0xf2, 0xce, 0xec, 0xef, 0xf7, 0x9b,
+ 0xef, 0xff, 0x18, 0x36, 0x93, 0x93, 0x1f, 0x65, 0x60, 0x74, 0x2f, 0x55, 0x89, 0x49, 0x18, 0x0b,
+ 0x93, 0x60, 0x22, 0x55, 0x4f, 0x3f, 0x13, 0x6a, 0x36, 0x89, 0x4c, 0x6f, 0xfe, 0xe1, 0x76, 0xdb,
+ 0x9c, 0xa7, 0x92, 0x00, 0xdb, 0x6d, 0x9d, 0xca, 0x20, 0x5f, 0xdc, 0x36, 0xd1, 0x4c, 0x6a, 0x23,
+ 0x66, 0xe9, 0xbd, 0xe2, 0x8d, 0x3e, 0xdd, 0x3c, 0x4d, 0x4e, 0x13, 0xf7, 0x7a, 0xcf, 0xbe, 0x65,
+ 0xbb, 0xdd, 0xdf, 0x6a, 0xe0, 0x1d, 0x4a, 0x23, 0xd8, 0xe7, 0xd0, 0x9c, 0x4b, 0xa5, 0xa3, 0x24,
+ 0xee, 0xd4, 0xee, 0xd4, 0xde, 0x6f, 0xdf, 0x7f, 0xab, 0xb7, 0x7c, 0x72, 0xef, 0x38, 0x83, 0x0c,
+ 0xbc, 0x8b, 0xe7, 0x7b, 0x37, 0x78, 0xce, 0x60, 0x5f, 0x00, 0x04, 0x4a, 0x0a, 0x23, 0xc3, 0xb1,
+ 0x30, 0x9d, 0xba, 0xe3, 0xbf, 0x5d, 0xc5, 0x7f, 0x9c, 0x1b, 0xc5, 0x7d, 0x22, 0xf4, 0x8d, 0x65,
+ 0x9f, 0xa5, 0x61, 0xce, 0x5e, 0x7b, 0x2d, 0x36, 0x11, 0xfa, 0xa6, 0xfb, 0xf7, 0x1a, 0x78, 0xdf,
+ 0x25, 0xa1, 0x64, 0xb7, 0xa0, 0x1e, 0x85, 0xce, 0x78, 0x7f, 0xd0, 0x78, 0xf9, 0x7c, 0xaf, 0x3e,
+ 0xda, 0xe7, 0xb8, 0xc3, 0xee, 0x83, 0x37, 0x43, 0x0f, 0xc9, 0xac, 0x4e, 0x95, 0xb0, 0x8d, 0x00,
+ 0xf9, 0xe4, 0xb0, 0xec, 0x13, 0xf0, 0x6c, 0x58, 0xc9, 0x98, 0x9d, 0x2a, 0x8e, 0x3d, 0xf3, 0x11,
+ 0x62, 0x72, 0x9e, 0xc5, 0xb3, 0x03, 0x68, 0x87, 0x52, 0x07, 0x2a, 0x4a, 0x8d, 0x8d, 0xa4, 0xe7,
+ 0xe8, 0x77, 0x57, 0xd1, 0xf7, 0x17, 0x50, 0x5e, 0xe6, 0x61, 0x44, 0x1a, 0xe8, 0xa7, 0x39, 0xd3,
+ 0x9d, 0x75, 0xa7, 0xb0, 0xbb, 0xd2, 0x00, 0x87, 0x22, 0x13, 0x88, 0xc3, 0xbe, 0x86, 0xad, 0x99,
+ 0x88, 0xc5, 0xa9, 0x54, 0x63, 0x52, 0x69, 0x38, 0x95, 0x77, 0x2a, 0x5d, 0xcf, 0x90, 0x99, 0x10,
+ 0xdf, 0x9c, 0x95, 0x97, 0xe8, 0x0e, 0x08, 0x63, 0x44, 0xf0, 0x74, 0x26, 0x63, 0xd3, 0x69, 0x3a,
+ 0x95, 0x77, 0x2b, 0x6d, 0x91, 0xe6, 0x59, 0xa2, 0x26, 0xfd, 0x02, 0xcc, 0x4b, 0x44, 0xf6, 0x15,
+ 0xb4, 0x03, 0xa9, 0x4c, 0xf4, 0x24, 0x0a, 0x30, 0x69, 0x9d, 0x96, 0xd3, 0xd9, 0xab, 0xd2, 0x19,
+ 0x2e, 0x60, 0xe4, 0x54, 0x99, 0xd9, 0xfd, 0xbd, 0x06, 0xcd, 0x47, 0x52, 0xcd, 0xa3, 0xe0, 0xff,
+ 0x4d, 0xf7, 0x67, 0xaf, 0xa4, 0xbb, 0xd2, 0x32, 0x3a, 0x76, 0x29, 0xe3, 0x9f, 0x42, 0x4b, 0xc6,
+ 0x61, 0x9a, 0x44, 0x18, 0x20, 0x6f, 0x75, 0xb5, 0x1c, 0x10, 0x86, 0x17, 0xe8, 0xee, 0xaf, 0x75,
+ 0x68, 0xe5, 0xdb, 0xec, 0x01, 0x59, 0x90, 0xf5, 0xde, 0x9d, 0xcb, 0x24, 0xac, 0x09, 0x74, 0xf8,
+ 0x03, 0x58, 0x4f, 0x13, 0x65, 0x34, 0x3a, 0xbb, 0xb6, 0xaa, 0x4c, 0x8e, 0x10, 0x30, 0x4c, 0xe2,
+ 0x27, 0xd1, 0x29, 0xcf, 0xc0, 0xec, 0x07, 0x68, 0xcf, 0x23, 0x65, 0xce, 0xc4, 0x74, 0x1c, 0xa5,
+ 0x1a, 0x9d, 0xb6, 0xdc, 0xf7, 0x2e, 0x3b, 0xb2, 0x77, 0x9c, 0xe1, 0x47, 0x47, 0x83, 0x2d, 0x0c,
+ 0x35, 0x14, 0x4b, 0xcd, 0x81, 0xa4, 0x46, 0xa9, 0xde, 0x3e, 0x04, 0xbf, 0xf8, 0xc2, 0x3e, 0x00,
+ 0x88, 0xb3, 0xaa, 0x18, 0x17, 0x79, 0xda, 0x44, 0xb2, 0x4f, 0xb5, 0x82, 0xe9, 0xf2, 0x09, 0x30,
+ 0x0a, 0x19, 0x03, 0x4f, 0x84, 0xa1, 0x72, 0x59, 0xf3, 0xb9, 0x7b, 0xef, 0xfe, 0xb2, 0x0e, 0xde,
+ 0x63, 0xa1, 0x27, 0xd7, 0xdd, 0xd9, 0xf6, 0xcc, 0xa5, 0x3c, 0xa3, 0x3b, 0x3a, 0x2b, 0x01, 0xeb,
+ 0x8e, 0xb7, 0x70, 0x87, 0x0a, 0xc3, 0xba, 0x43, 0x80, 0xcc, 0x1d, 0x3d, 0x4d, 0x8c, 0x6b, 0x5f,
+ 0x8f, 0xbb, 0x77, 0x76, 0x17, 0x9a, 0x31, 0xb6, 0xac, 0xa5, 0x37, 0x1c, 0x1d, 0x90, 0xde, 0xb0,
+ 0x5d, 0x8c, 0xdc, 0x86, 0xfd, 0x84, 0x44, 0x6c, 0x15, 0x11, 0xc7, 0x09, 0xb6, 0x1f, 0xce, 0x01,
+ 0x4d, 0x2d, 0x57, 0x59, 0x90, 0xfd, 0x05, 0x2c, 0x6f, 0x95, 0x12, 0x93, 0x1d, 0xc3, 0x9b, 0xb9,
+ 0xbd, 0x65, 0xc1, 0xd6, 0x55, 0x04, 0x19, 0x29, 0x94, 0xbe, 0x94, 0x46, 0x93, 0xbf, 0x7a, 0x34,
+ 0xb9, 0x08, 0x56, 0x8d, 0xa6, 0x01, 0x6c, 0xe2, 0x9c, 0x8b, 0x14, 0x8e, 0x7a, 0xbb, 0x23, 0x3b,
+ 0x80, 0x22, 0x5b, 0x2b, 0xa6, 0x3d, 0x89, 0x48, 0xbe, 0x41, 0x1c, 0xb7, 0x62, 0x7d, 0x68, 0x51,
+ 0xdd, 0xe8, 0x4e, 0xdb, 0xd5, 0xee, 0x6b, 0x8e, 0xa4, 0x82, 0xf6, 0x4a, 0xd3, 0x6e, 0x5c, 0xa9,
+ 0x69, 0x9f, 0xc2, 0x1b, 0x4b, 0xc2, 0xec, 0x63, 0xcc, 0x6c, 0xb6, 0x79, 0xd9, 0xdd, 0x49, 0x3c,
+ 0x9e, 0x63, 0xd9, 0x0e, 0xf8, 0xb6, 0xce, 0xa5, 0xd6, 0x32, 0xeb, 0x60, 0x9f, 0x2f, 0x36, 0xba,
+ 0x3f, 0xd7, 0xa1, 0x49, 0x94, 0xeb, 0x9e, 0x75, 0x74, 0xec, 0x52, 0x0f, 0x3c, 0x84, 0x8d, 0x50,
+ 0x45, 0x73, 0xba, 0x57, 0x24, 0xcd, 0xbb, 0xed, 0x2a, 0x89, 0x7d, 0x87, 0xc3, 0x5b, 0xcd, 0xfd,
+ 0x66, 0x89, 0x7b, 0x08, 0x5e, 0x94, 0x8a, 0x19, 0xdd, 0x69, 0x95, 0x27, 0x8f, 0x8e, 0xfa, 0x87,
+ 0xdf, 0xa7, 0x59, 0x0d, 0xb6, 0xd0, 0x51, 0xcf, 0x6e, 0x70, 0x47, 0xeb, 0xfe, 0x83, 0x01, 0x19,
+ 0x4e, 0xcf, 0xb4, 0x91, 0xea, 0xba, 0x03, 0x42, 0xc7, 0x2e, 0x05, 0x64, 0x08, 0x4d, 0x95, 0x24,
+ 0x66, 0x1c, 0x88, 0xcb, 0x62, 0xc1, 0x11, 0x32, 0xec, 0x0f, 0xb6, 0x2c, 0xd1, 0xb6, 0x7c, 0xb6,
+ 0xe6, 0x0d, 0x4b, 0x1d, 0x0a, 0x1c, 0xc7, 0xb7, 0xf2, 0x41, 0x79, 0x82, 0x3b, 0xda, 0x28, 0x91,
+ 0x8e, 0x27, 0xf2, 0xdc, 0x5e, 0xfe, 0x6b, 0xab, 0xae, 0xed, 0x83, 0x38, 0x50, 0xe7, 0x2e, 0x50,
+ 0xdf, 0xc8, 0x73, 0x7e, 0x93, 0x04, 0x06, 0x39, 0x1f, 0x37, 0x35, 0xfb, 0x12, 0x76, 0x64, 0x01,
+ 0xb3, 0x8a, 0xe3, 0x29, 0xfe, 0x77, 0xc2, 0x2b, 0x60, 0x1c, 0x4c, 0x51, 0xd1, 0x4d, 0x21, 0x8f,
+ 0xdf, 0x96, 0x65, 0xa9, 0x6f, 0x33, 0xc4, 0xd0, 0x02, 0x06, 0x3b, 0x17, 0x2f, 0x76, 0x6f, 0xfc,
+ 0x89, 0xcf, 0xbf, 0x2f, 0x76, 0x6b, 0x3f, 0xbd, 0xdc, 0xad, 0x5d, 0xe0, 0xf3, 0x07, 0x3e, 0x7f,
+ 0xe1, 0x73, 0xd2, 0x70, 0xff, 0x20, 0x3f, 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x76, 0xa2, 0xea,
+ 0x9b, 0xb1, 0x0a, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/objects.proto b/vendor/src/github.com/docker/swarmkit/api/objects.proto
new file mode 100644
index 0000000000..37bd6bd6a1
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/objects.proto
@@ -0,0 +1,207 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "types.proto";
+import "specs.proto";
+import "timestamp/timestamp.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "gogoproto/gogo.proto";
+
+// This file contains definitions for all first-class objects in the cluster
+// API. Such types typically have a corresponding specification, with the
+// naming XXXSpec, but not all.
+
+// Meta contains metadata about objects. Every object contains a meta field.
+message Meta {
+ // Version tracks the current version of the object.
+ Version version = 1 [(gogoproto.nullable) = false];
+
+ // Object timestamps.
+ Timestamp created_at = 2;
+ Timestamp updated_at = 3;
+}
+
+// Node provides the internal node state as seen by the cluster.
+message Node {
+ // ID specifies the identity of the node.
+ string id = 1 [(gogoproto.customname) = "ID"];
+
+ Meta meta = 2 [(gogoproto.nullable) = false];
+
+ // Spec defines the desired state of the node as specified by the user.
+ // The system will honor this and will *never* modify it.
+ NodeSpec spec = 3 [(gogoproto.nullable) = false];
+
+ // Description encapsulated the properties of the Node as reported by the
+ // agent.
+ NodeDescription description = 4;
+
+ // Status provides the current status of the node, as seen by the manager.
+ NodeStatus status = 5 [(gogoproto.nullable) = false];
+
+ // Status of the manager. If the node is not a manager, this field will not
+ // be set.
+ ManagerStatus manager_status = 6;
+
+ // The node attachment to the ingress network.
+ NetworkAttachment attachment = 7;
+
+ // Certificate is the TLS certificate issued for the node, if any.
+ Certificate certificate = 8 [(gogoproto.nullable) = false];
+}
+
+message Service {
+ string id = 1 [(gogoproto.customname) = "ID"];
+
+ Meta meta = 2 [(gogoproto.nullable) = false];
+
+ ServiceSpec spec = 3 [(gogoproto.nullable) = false];
+
+ // Runtime state of service endpoint. This may be different
+ // from the spec version because the user may not have entered
+ // the optional fields like node_port or virtual_ip and it
+ // could be auto allocated by the system.
+ Endpoint endpoint = 4;
+}
+
+// Endpoint specified all the network parameters required to
+// correctly discover and load balance a service
+message Endpoint {
+ EndpointSpec spec = 1;
+
+ // Runtime state of the exposed ports which may carry
+ // auto-allocated swarm ports in addition to the user
+ // configured information.
+ repeated PortConfig ports = 2;
+
+ // An endpoint attachment specifies the data that the process
+ // of attaching an endpoint to a network creates.
+
+ // VirtualIP specifies a set of networks this endpoint will be attached to
+ // and the IP addresses the target service will be made available under.
+ message VirtualIP {
+ // NetworkID for which this endpoint attachment was created.
+ string network_id = 1 [(gogoproto.customname) = "NetworkID"];
+
+ // A virtual IP is used to address this service in IP
+ // layer that the client can use to send requests to
+ // this service. A DNS A/AAAA query on the service
+ // name might return this IP to the client. This is
+ // strictly a logical IP and there may not be any
+ // interfaces assigned this IP address or any route
+ // created for this address. More than one to
+ // accomodate for both IPv4 and IPv6
+ string addr = 2;
+ }
+
+ // VirtualIPs specifies the IP addresses under which this endpoint will be
+ // made available.
+ repeated VirtualIP virtual_ips = 3 [(gogoproto.customname) = "VirtualIPs"];
+}
+
+// Task specifies the parameters for implementing a Spec. A task is effectively
+// immutable and idempotent. Once it is dispatched to a node, it will not be
+// dispatched to another node.
+message Task {
+ string id = 1 [(gogoproto.customname) = "ID"];
+
+ Meta meta = 2 [(gogoproto.nullable) = false];
+
+ // Spec defines the desired state of the task as specified by the user.
+ // The system will honor this and will *never* modify it.
+ TaskSpec spec = 3 [(gogoproto.nullable) = false];
+
+ // ServiceID indicates the service under which this task is orchestrated. This
+ // should almost always be set.
+ string service_id = 4 [(gogoproto.customname) = "ServiceID"];
+
+ // Slot is the service slot number for a task.
+ // For example, if a replicated service has replicas = 2, there will be a
+ // task with slot = 1, and another with slot = 2.
+ uint64 slot = 5;
+
+ // NodeID indicates the node to which the task is assigned. If this field
+ // is empty or not set, the task is unassigned.
+ string node_id = 6 [(gogoproto.customname) = "NodeID"];
+
+ // Annotations defines the names and labels for the runtime, as set by
+ // the cluster manager.
+ //
+ // As backup, if this field has an empty name, the runtime will
+ // allocate a unique name for the actual container.
+ //
+ // NOTE(stevvooe): The preserves the ability for us to making naming
+ // decisions for tasks in orchestrator, albeit, this is left empty for now.
+ Annotations annotations = 7 [(gogoproto.nullable) = false];
+
+ // ServiceAnnotations is a direct copy of the service name and labels when
+ // this task is created.
+ //
+ // Labels set here will *not* be propagated to the runtime target, such as a
+ // container. Use labels on the runtime target for that purpose.
+ Annotations service_annotations = 8 [(gogoproto.nullable) = false];
+
+ TaskStatus status = 9 [(gogoproto.nullable) = false];
+
+ // DesiredState is the target state for the task. It is set to
+ // TaskStateRunning when a task is first created, and changed to
+ // TaskStateShutdown if the manager wants to terminate the task. This field
+ // is only written by the manager.
+ TaskState desired_state = 10;
+
+ // List of network attachments by the task.
+ repeated NetworkAttachment networks = 11;
+
+ // A copy of runtime state of service endpoint from Service
+ // object to be distributed to agents as part of the task.
+ Endpoint endpoint = 12;
+}
+
+// NetworkAttachment specifies the network parameters of attachment to
+// a single network by an object such as task or node.
+message NetworkAttachment {
+ // Network state as a whole becomes part of the object so that
+ // it always is available for use in agents so that agents
+ // don't have any other dependency during execution.
+ Network network = 1;
+
+ // List of IPv4/IPv6 addresses that are assigned to the object
+ // as part of getting attached to this network.
+ repeated string addresses = 2;
+}
+
+message Network {
+ string id = 1 [(gogoproto.customname) = "ID"];
+
+ Meta meta = 2 [(gogoproto.nullable) = false];
+
+ NetworkSpec spec = 3 [(gogoproto.nullable) = false];
+
+ // Driver specific operational state provided by the network driver.
+ Driver driver_state = 4;
+
+ // Runtime state of IPAM options. This may not reflect the
+ // ipam options from NetworkSpec.
+ IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"];
+}
+
+// Cluster provides global cluster settings.
+message Cluster {
+ string id = 1 [(gogoproto.customname) = "ID"];
+
+ Meta meta = 2 [(gogoproto.nullable) = false];
+
+ ClusterSpec spec = 3 [(gogoproto.nullable) = false];
+
+ // RootCA contains key material for the root CA.
+ RootCA root_ca = 4 [(gogoproto.nullable)=false, (gogoproto.customname) = "RootCA"];
+
+ // Symmetric encryption key distributed by the lead manager. Used by agents
+ // for securing network bootstrapping and communication.
+ repeated EncryptionKey network_bootstrap_keys = 5;
+
+ // Logical clock used to timestamp every key. It allows other managers
+ // and agents to unambiguously identify the older key to be deleted when
+ // a new key is allocated on key rotation.
+ uint64 encryption_key_lamport_clock = 6;
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/raft.pb.go b/vendor/src/github.com/docker/swarmkit/api/raft.pb.go
new file mode 100644
index 0000000000..f3a819dc78
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/raft.pb.go
@@ -0,0 +1,2764 @@
+// Code generated by protoc-gen-gogo.
+// source: raft.proto
+// DO NOT EDIT!
+
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import raftpb "github.com/coreos/etcd/raft/raftpb"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+// skipping weak import docker_protobuf_plugin "github.com/docker/swarmkit/protobuf/plugin"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import codes "google.golang.org/grpc/codes"
+import metadata "google.golang.org/grpc/metadata"
+import transport "google.golang.org/grpc/transport"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// StoreActionKind defines the operation to take on the store for the target of
+// a storage action.
+type StoreActionKind int32
+
+const (
+ StoreActionKindUnknown StoreActionKind = 0
+ StoreActionKindCreate StoreActionKind = 1
+ StoreActionKindUpdate StoreActionKind = 2
+ StoreActionKindRemove StoreActionKind = 3
+)
+
+var StoreActionKind_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "STORE_ACTION_CREATE",
+ 2: "STORE_ACTION_UPDATE",
+ 3: "STORE_ACTION_REMOVE",
+}
+var StoreActionKind_value = map[string]int32{
+ "UNKNOWN": 0,
+ "STORE_ACTION_CREATE": 1,
+ "STORE_ACTION_UPDATE": 2,
+ "STORE_ACTION_REMOVE": 3,
+}
+
+func (x StoreActionKind) String() string {
+ return proto.EnumName(StoreActionKind_name, int32(x))
+}
+func (StoreActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} }
+
+type JoinRequest struct {
+ // Addr specifies the address of the member
+ Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
+}
+
+func (m *JoinRequest) Reset() { *m = JoinRequest{} }
+func (*JoinRequest) ProtoMessage() {}
+func (*JoinRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} }
+
+type JoinResponse struct {
+ Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *JoinResponse) Reset() { *m = JoinResponse{} }
+func (*JoinResponse) ProtoMessage() {}
+func (*JoinResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} }
+
+type LeaveRequest struct {
+ Node *RaftMember `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"`
+}
+
+func (m *LeaveRequest) Reset() { *m = LeaveRequest{} }
+func (*LeaveRequest) ProtoMessage() {}
+func (*LeaveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} }
+
+type LeaveResponse struct {
+}
+
+func (m *LeaveResponse) Reset() { *m = LeaveResponse{} }
+func (*LeaveResponse) ProtoMessage() {}
+func (*LeaveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} }
+
+type ProcessRaftMessageRequest struct {
+ Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
+}
+
+func (m *ProcessRaftMessageRequest) Reset() { *m = ProcessRaftMessageRequest{} }
+func (*ProcessRaftMessageRequest) ProtoMessage() {}
+func (*ProcessRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} }
+
+type ProcessRaftMessageResponse struct {
+}
+
+func (m *ProcessRaftMessageResponse) Reset() { *m = ProcessRaftMessageResponse{} }
+func (*ProcessRaftMessageResponse) ProtoMessage() {}
+func (*ProcessRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} }
+
+type ResolveAddressRequest struct {
+ // raft_id is the ID to resolve to an address.
+ RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"`
+}
+
+func (m *ResolveAddressRequest) Reset() { *m = ResolveAddressRequest{} }
+func (*ResolveAddressRequest) ProtoMessage() {}
+func (*ResolveAddressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} }
+
+type ResolveAddressResponse struct {
+ // Addr specifies the address of the member
+ Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
+}
+
+func (m *ResolveAddressResponse) Reset() { *m = ResolveAddressResponse{} }
+func (*ResolveAddressResponse) ProtoMessage() {}
+func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} }
+
+// Contains one of many protobuf encoded objects to replicate
+// over the raft backend with a request ID to track when the
+// action is effectively applied
+type InternalRaftRequest struct {
+ ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Action []*StoreAction `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+}
+
+func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
+func (*InternalRaftRequest) ProtoMessage() {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} }
+
+// StoreAction defines a taret and operation to apply on the storage system.
+type StoreAction struct {
+ Action StoreActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.StoreActionKind" json:"action,omitempty"`
+ // Types that are valid to be assigned to Target:
+ // *StoreAction_Node
+ // *StoreAction_Service
+ // *StoreAction_Task
+ // *StoreAction_Network
+ // *StoreAction_Cluster
+ Target isStoreAction_Target `protobuf_oneof:"target"`
+}
+
+func (m *StoreAction) Reset() { *m = StoreAction{} }
+func (*StoreAction) ProtoMessage() {}
+func (*StoreAction) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} }
+
+type isStoreAction_Target interface {
+ isStoreAction_Target()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type StoreAction_Node struct {
+ Node *Node `protobuf:"bytes,2,opt,name=node,oneof"`
+}
+type StoreAction_Service struct {
+ Service *Service `protobuf:"bytes,3,opt,name=service,oneof"`
+}
+type StoreAction_Task struct {
+ Task *Task `protobuf:"bytes,4,opt,name=task,oneof"`
+}
+type StoreAction_Network struct {
+ Network *Network `protobuf:"bytes,5,opt,name=network,oneof"`
+}
+type StoreAction_Cluster struct {
+ Cluster *Cluster `protobuf:"bytes,6,opt,name=cluster,oneof"`
+}
+
+func (*StoreAction_Node) isStoreAction_Target() {}
+func (*StoreAction_Service) isStoreAction_Target() {}
+func (*StoreAction_Task) isStoreAction_Target() {}
+func (*StoreAction_Network) isStoreAction_Target() {}
+func (*StoreAction_Cluster) isStoreAction_Target() {}
+
+func (m *StoreAction) GetTarget() isStoreAction_Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *StoreAction) GetNode() *Node {
+ if x, ok := m.GetTarget().(*StoreAction_Node); ok {
+ return x.Node
+ }
+ return nil
+}
+
+func (m *StoreAction) GetService() *Service {
+ if x, ok := m.GetTarget().(*StoreAction_Service); ok {
+ return x.Service
+ }
+ return nil
+}
+
+func (m *StoreAction) GetTask() *Task {
+ if x, ok := m.GetTarget().(*StoreAction_Task); ok {
+ return x.Task
+ }
+ return nil
+}
+
+func (m *StoreAction) GetNetwork() *Network {
+ if x, ok := m.GetTarget().(*StoreAction_Network); ok {
+ return x.Network
+ }
+ return nil
+}
+
+func (m *StoreAction) GetCluster() *Cluster {
+ if x, ok := m.GetTarget().(*StoreAction_Cluster); ok {
+ return x.Cluster
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*StoreAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _StoreAction_OneofMarshaler, _StoreAction_OneofUnmarshaler, _StoreAction_OneofSizer, []interface{}{
+ (*StoreAction_Node)(nil),
+ (*StoreAction_Service)(nil),
+ (*StoreAction_Task)(nil),
+ (*StoreAction_Network)(nil),
+ (*StoreAction_Cluster)(nil),
+ }
+}
+
+func _StoreAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*StoreAction)
+ // target
+ switch x := m.Target.(type) {
+ case *StoreAction_Node:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Node); err != nil {
+ return err
+ }
+ case *StoreAction_Service:
+ _ = b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Service); err != nil {
+ return err
+ }
+ case *StoreAction_Task:
+ _ = b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Task); err != nil {
+ return err
+ }
+ case *StoreAction_Network:
+ _ = b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Network); err != nil {
+ return err
+ }
+ case *StoreAction_Cluster:
+ _ = b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Cluster); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("StoreAction.Target has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _StoreAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*StoreAction)
+ switch tag {
+ case 2: // target.node
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Node)
+ err := b.DecodeMessage(msg)
+ m.Target = &StoreAction_Node{msg}
+ return true, err
+ case 3: // target.service
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Service)
+ err := b.DecodeMessage(msg)
+ m.Target = &StoreAction_Service{msg}
+ return true, err
+ case 4: // target.task
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Task)
+ err := b.DecodeMessage(msg)
+ m.Target = &StoreAction_Task{msg}
+ return true, err
+ case 5: // target.network
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Network)
+ err := b.DecodeMessage(msg)
+ m.Target = &StoreAction_Network{msg}
+ return true, err
+ case 6: // target.cluster
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Cluster)
+ err := b.DecodeMessage(msg)
+ m.Target = &StoreAction_Cluster{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _StoreAction_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*StoreAction)
+ // target
+ switch x := m.Target.(type) {
+ case *StoreAction_Node:
+ s := proto.Size(x.Node)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *StoreAction_Service:
+ s := proto.Size(x.Service)
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *StoreAction_Task:
+ s := proto.Size(x.Task)
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *StoreAction_Network:
+ s := proto.Size(x.Network)
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *StoreAction_Cluster:
+ s := proto.Size(x.Cluster)
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+func init() {
+ proto.RegisterType((*JoinRequest)(nil), "docker.swarmkit.v1.JoinRequest")
+ proto.RegisterType((*JoinResponse)(nil), "docker.swarmkit.v1.JoinResponse")
+ proto.RegisterType((*LeaveRequest)(nil), "docker.swarmkit.v1.LeaveRequest")
+ proto.RegisterType((*LeaveResponse)(nil), "docker.swarmkit.v1.LeaveResponse")
+ proto.RegisterType((*ProcessRaftMessageRequest)(nil), "docker.swarmkit.v1.ProcessRaftMessageRequest")
+ proto.RegisterType((*ProcessRaftMessageResponse)(nil), "docker.swarmkit.v1.ProcessRaftMessageResponse")
+ proto.RegisterType((*ResolveAddressRequest)(nil), "docker.swarmkit.v1.ResolveAddressRequest")
+ proto.RegisterType((*ResolveAddressResponse)(nil), "docker.swarmkit.v1.ResolveAddressResponse")
+ proto.RegisterType((*InternalRaftRequest)(nil), "docker.swarmkit.v1.InternalRaftRequest")
+ proto.RegisterType((*StoreAction)(nil), "docker.swarmkit.v1.StoreAction")
+ proto.RegisterEnum("docker.swarmkit.v1.StoreActionKind", StoreActionKind_name, StoreActionKind_value)
+}
+
+type authenticatedWrapperRaftServer struct {
+ local RaftServer
+ authorize func(context.Context, []string) error
+}
+
+func NewAuthenticatedWrapperRaftServer(local RaftServer, authorize func(context.Context, []string) error) RaftServer {
+ return &authenticatedWrapperRaftServer{
+ local: local,
+ authorize: authorize,
+ }
+}
+
+func (p *authenticatedWrapperRaftServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.Join(ctx, r)
+}
+
+func (p *authenticatedWrapperRaftServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.Leave(ctx, r)
+}
+
+func (p *authenticatedWrapperRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.ProcessRaftMessage(ctx, r)
+}
+
+func (p *authenticatedWrapperRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) {
+
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
+ return nil, err
+ }
+ return p.local.ResolveAddress(ctx, r)
+}
+
+func (m *JoinRequest) Copy() *JoinRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &JoinRequest{
+ Addr: m.Addr,
+ }
+
+ return o
+}
+
+func (m *JoinResponse) Copy() *JoinResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &JoinResponse{}
+
+ if m.Members != nil {
+ o.Members = make([]*RaftMember, 0, len(m.Members))
+ for _, v := range m.Members {
+ o.Members = append(o.Members, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *LeaveRequest) Copy() *LeaveRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &LeaveRequest{
+ Node: m.Node.Copy(),
+ }
+
+ return o
+}
+
+func (m *LeaveResponse) Copy() *LeaveResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &LeaveResponse{}
+
+ return o
+}
+
+func (m *ProcessRaftMessageResponse) Copy() *ProcessRaftMessageResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &ProcessRaftMessageResponse{}
+
+ return o
+}
+
+func (m *ResolveAddressRequest) Copy() *ResolveAddressRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &ResolveAddressRequest{
+ RaftID: m.RaftID,
+ }
+
+ return o
+}
+
+func (m *ResolveAddressResponse) Copy() *ResolveAddressResponse {
+ if m == nil {
+ return nil
+ }
+
+ o := &ResolveAddressResponse{
+ Addr: m.Addr,
+ }
+
+ return o
+}
+
+func (m *InternalRaftRequest) Copy() *InternalRaftRequest {
+ if m == nil {
+ return nil
+ }
+
+ o := &InternalRaftRequest{
+ ID: m.ID,
+ }
+
+ if m.Action != nil {
+ o.Action = make([]*StoreAction, 0, len(m.Action))
+ for _, v := range m.Action {
+ o.Action = append(o.Action, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *StoreAction) Copy() *StoreAction {
+ if m == nil {
+ return nil
+ }
+
+ o := &StoreAction{
+ Action: m.Action,
+ }
+
+ switch m.Target.(type) {
+ case *StoreAction_Node:
+ i := &StoreAction_Node{
+ Node: m.GetNode().Copy(),
+ }
+
+ o.Target = i
+ case *StoreAction_Service:
+ i := &StoreAction_Service{
+ Service: m.GetService().Copy(),
+ }
+
+ o.Target = i
+ case *StoreAction_Task:
+ i := &StoreAction_Task{
+ Task: m.GetTask().Copy(),
+ }
+
+ o.Target = i
+ case *StoreAction_Network:
+ i := &StoreAction_Network{
+ Network: m.GetNetwork().Copy(),
+ }
+
+ o.Target = i
+ case *StoreAction_Cluster:
+ i := &StoreAction_Cluster{
+ Cluster: m.GetCluster().Copy(),
+ }
+
+ o.Target = i
+ }
+
+ return o
+}
+
+func (this *JoinRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.JoinRequest{")
+ s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *JoinResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.JoinResponse{")
+ if this.Members != nil {
+ s = append(s, "Members: "+fmt.Sprintf("%#v", this.Members)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *LeaveRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.LeaveRequest{")
+ if this.Node != nil {
+ s = append(s, "Node: "+fmt.Sprintf("%#v", this.Node)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *LeaveResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.LeaveResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ProcessRaftMessageRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ProcessRaftMessageRequest{")
+ if this.Message != nil {
+ s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ProcessRaftMessageResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.ProcessRaftMessageResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ResolveAddressRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ResolveAddressRequest{")
+ s = append(s, "RaftID: "+fmt.Sprintf("%#v", this.RaftID)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ResolveAddressResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ResolveAddressResponse{")
+ s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *InternalRaftRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.InternalRaftRequest{")
+ s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
+ if this.Action != nil {
+ s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *StoreAction) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&api.StoreAction{")
+ s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n")
+ if this.Target != nil {
+ s = append(s, "Target: "+fmt.Sprintf("%#v", this.Target)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *StoreAction_Node) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.StoreAction_Node{` +
+ `Node:` + fmt.Sprintf("%#v", this.Node) + `}`}, ", ")
+ return s
+}
+func (this *StoreAction_Service) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.StoreAction_Service{` +
+ `Service:` + fmt.Sprintf("%#v", this.Service) + `}`}, ", ")
+ return s
+}
+func (this *StoreAction_Task) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.StoreAction_Task{` +
+ `Task:` + fmt.Sprintf("%#v", this.Task) + `}`}, ", ")
+ return s
+}
+func (this *StoreAction_Network) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.StoreAction_Network{` +
+ `Network:` + fmt.Sprintf("%#v", this.Network) + `}`}, ", ")
+ return s
+}
+func (this *StoreAction_Cluster) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.StoreAction_Cluster{` +
+ `Cluster:` + fmt.Sprintf("%#v", this.Cluster) + `}`}, ", ")
+ return s
+}
+func valueToGoStringRaft(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringRaft(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion2
+
+// Client API for Raft service
+
+type RaftClient interface {
+ // Join adds a RaftMember to the raft cluster.
+ Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error)
+ // Leave removes a RaftMember from the raft cluster.
+ Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error)
+ // ProcessRaftMessage sends a raft message to be processed on a raft member, it is
+ // called from the RaftMember willing to send a message to its destination ('To' field)
+ ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error)
+ // ResolveAddress returns the address where the node with the given ID can be reached.
+ ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error)
+}
+
+type raftClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewRaftClient(cc *grpc.ClientConn) RaftClient {
+ return &raftClient{cc}
+}
+
+func (c *raftClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) {
+ out := new(JoinResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/Join", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *raftClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) {
+ out := new(LeaveResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/Leave", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *raftClient) ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) {
+ out := new(ProcessRaftMessageResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ProcessRaftMessage", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *raftClient) ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) {
+ out := new(ResolveAddressResponse)
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ResolveAddress", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Raft service
+
+type RaftServer interface {
+ // Join adds a RaftMember to the raft cluster.
+ Join(context.Context, *JoinRequest) (*JoinResponse, error)
+ // Leave removes a RaftMember from the raft cluster.
+ Leave(context.Context, *LeaveRequest) (*LeaveResponse, error)
+ // ProcessRaftMessage sends a raft message to be processed on a raft member, it is
+ // called from the RaftMember willing to send a message to its destination ('To' field)
+ ProcessRaftMessage(context.Context, *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error)
+ // ResolveAddress returns the address where the node with the given ID can be reached.
+ ResolveAddress(context.Context, *ResolveAddressRequest) (*ResolveAddressResponse, error)
+}
+
+func RegisterRaftServer(s *grpc.Server, srv RaftServer) {
+ s.RegisterService(&_Raft_serviceDesc, srv)
+}
+
+func _Raft_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(JoinRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RaftServer).Join(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Raft/Join",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RaftServer).Join(ctx, req.(*JoinRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Raft_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RaftServer).Leave(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Raft/Leave",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RaftServer).Leave(ctx, req.(*LeaveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Raft_ProcessRaftMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ProcessRaftMessageRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RaftServer).ProcessRaftMessage(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Raft/ProcessRaftMessage",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RaftServer).ProcessRaftMessage(ctx, req.(*ProcessRaftMessageRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Raft_ResolveAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ResolveAddressRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RaftServer).ResolveAddress(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/docker.swarmkit.v1.Raft/ResolveAddress",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RaftServer).ResolveAddress(ctx, req.(*ResolveAddressRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Raft_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "docker.swarmkit.v1.Raft",
+ HandlerType: (*RaftServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Join",
+ Handler: _Raft_Join_Handler,
+ },
+ {
+ MethodName: "Leave",
+ Handler: _Raft_Leave_Handler,
+ },
+ {
+ MethodName: "ProcessRaftMessage",
+ Handler: _Raft_ProcessRaftMessage_Handler,
+ },
+ {
+ MethodName: "ResolveAddress",
+ Handler: _Raft_ResolveAddress_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+}
+
+func (m *JoinRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JoinRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Addr) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintRaft(data, i, uint64(len(m.Addr)))
+ i += copy(data[i:], m.Addr)
+ }
+ return i, nil
+}
+
+func (m *JoinResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JoinResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ data[i] = 0xa
+ i++
+ i = encodeVarintRaft(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LeaveRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LeaveRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Node != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Node.Size()))
+ n1, err := m.Node.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ return i, nil
+}
+
+func (m *LeaveResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LeaveResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *ProcessRaftMessageRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ProcessRaftMessageRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Message != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Message.Size()))
+ n2, err := m.Message.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func (m *ProcessRaftMessageResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ProcessRaftMessageResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *ResolveAddressRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResolveAddressRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.RaftID != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.RaftID))
+ }
+ return i, nil
+}
+
+func (m *ResolveAddressResponse) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResolveAddressResponse) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Addr) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintRaft(data, i, uint64(len(m.Addr)))
+ i += copy(data[i:], m.Addr)
+ }
+ return i, nil
+}
+
+func (m *InternalRaftRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *InternalRaftRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.ID))
+ }
+ if len(m.Action) > 0 {
+ for _, msg := range m.Action {
+ data[i] = 0x12
+ i++
+ i = encodeVarintRaft(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *StoreAction) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *StoreAction) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Action != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Action))
+ }
+ if m.Target != nil {
+ nn3, err := m.Target.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn3
+ }
+ return i, nil
+}
+
+func (m *StoreAction_Node) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Node != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Node.Size()))
+ n4, err := m.Node.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ return i, nil
+}
+func (m *StoreAction_Service) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Service != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Service.Size()))
+ n5, err := m.Service.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ return i, nil
+}
+func (m *StoreAction_Task) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Task != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Task.Size()))
+ n6, err := m.Task.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+func (m *StoreAction_Network) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Network != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Network.Size()))
+ n7, err := m.Network.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ return i, nil
+}
+func (m *StoreAction_Cluster) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Cluster != nil {
+ data[i] = 0x32
+ i++
+ i = encodeVarintRaft(data, i, uint64(m.Cluster.Size()))
+ n8, err := m.Cluster.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+func encodeFixed64Raft(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Raft(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintRaft(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+type raftProxyRaftServer struct {
+ local RaftServer
+ connSelector *raftpicker.ConnSelector
+ cluster raftpicker.RaftCluster
+ ctxMods []func(context.Context) (context.Context, error)
+}
+
+func NewRaftProxyRaftServer(local RaftServer, connSelector *raftpicker.ConnSelector, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) RaftServer {
+ redirectChecker := func(ctx context.Context) (context.Context, error) {
+ s, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ }
+ addr := s.ServerTransport().RemoteAddr().String()
+ md, ok := metadata.FromContext(ctx)
+ if ok && len(md["redirect"]) != 0 {
+ return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ }
+ if !ok {
+ md = metadata.New(map[string]string{})
+ }
+ md["redirect"] = append(md["redirect"], addr)
+ return metadata.NewContext(ctx, md), nil
+ }
+ mods := []func(context.Context) (context.Context, error){redirectChecker}
+ mods = append(mods, ctxMod)
+
+ return &raftProxyRaftServer{
+ local: local,
+ cluster: cluster,
+ connSelector: connSelector,
+ ctxMods: mods,
+ }
+}
+func (p *raftProxyRaftServer) runCtxMods(ctx context.Context) (context.Context, error) {
+ var err error
+ for _, mod := range p.ctxMods {
+ ctx, err = mod(ctx)
+ if err != nil {
+ return ctx, err
+ }
+ }
+ return ctx, nil
+}
+
+func (p *raftProxyRaftServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.Join(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewRaftClient(conn).Join(ctx, r)
+}
+
+func (p *raftProxyRaftServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.Leave(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewRaftClient(conn).Leave(ctx, r)
+}
+
+func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.ProcessRaftMessage(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewRaftClient(conn).ProcessRaftMessage(ctx, r)
+}
+
+func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) {
+
+ if p.cluster.IsLeader() {
+ return p.local.ResolveAddress(ctx, r)
+ }
+ ctx, err := p.runCtxMods(ctx)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := p.connSelector.Conn()
+ if err != nil {
+ return nil, err
+ }
+ return NewRaftClient(conn).ResolveAddress(ctx, r)
+}
+
+func (m *JoinRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Addr)
+ if l > 0 {
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func (m *JoinResponse) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LeaveRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Node != nil {
+ l = m.Node.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaveResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *ProcessRaftMessageRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Message != nil {
+ l = m.Message.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func (m *ProcessRaftMessageResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *ResolveAddressRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.RaftID != 0 {
+ n += 1 + sovRaft(uint64(m.RaftID))
+ }
+ return n
+}
+
+func (m *ResolveAddressResponse) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Addr)
+ if l > 0 {
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func (m *InternalRaftRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRaft(uint64(m.ID))
+ }
+ if len(m.Action) > 0 {
+ for _, e := range m.Action {
+ l = e.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StoreAction) Size() (n int) {
+ var l int
+ _ = l
+ if m.Action != 0 {
+ n += 1 + sovRaft(uint64(m.Action))
+ }
+ if m.Target != nil {
+ n += m.Target.Size()
+ }
+ return n
+}
+
+func (m *StoreAction_Node) Size() (n int) {
+ var l int
+ _ = l
+ if m.Node != nil {
+ l = m.Node.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+func (m *StoreAction_Service) Size() (n int) {
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+func (m *StoreAction_Task) Size() (n int) {
+ var l int
+ _ = l
+ if m.Task != nil {
+ l = m.Task.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+func (m *StoreAction_Network) Size() (n int) {
+ var l int
+ _ = l
+ if m.Network != nil {
+ l = m.Network.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+func (m *StoreAction_Cluster) Size() (n int) {
+ var l int
+ _ = l
+ if m.Cluster != nil {
+ l = m.Cluster.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func sovRaft(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRaft(x uint64) (n int) {
+ return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *JoinRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&JoinRequest{`,
+ `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *JoinResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&JoinResponse{`,
+ `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LeaveRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LeaveRequest{`,
+ `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "RaftMember", "RaftMember", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LeaveResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LeaveResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProcessRaftMessageRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProcessRaftMessageRequest{`,
+ `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProcessRaftMessageResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProcessRaftMessageResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResolveAddressRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResolveAddressRequest{`,
+ `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResolveAddressResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResolveAddressResponse{`,
+ `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *InternalRaftRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&InternalRaftRequest{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "StoreAction", "StoreAction", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StoreAction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StoreAction{`,
+ `Action:` + fmt.Sprintf("%v", this.Action) + `,`,
+ `Target:` + fmt.Sprintf("%v", this.Target) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StoreAction_Node) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StoreAction_Node{`,
+ `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StoreAction_Service) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StoreAction_Service{`,
+ `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StoreAction_Task) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StoreAction_Task{`,
+ `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StoreAction_Network) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StoreAction_Network{`,
+ `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StoreAction_Cluster) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StoreAction_Cluster{`,
+ `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringRaft(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *JoinRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JoinRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JoinRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addr = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JoinResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JoinResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JoinResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &RaftMember{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaveRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Node == nil {
+ m.Node = &RaftMember{}
+ }
+ if err := m.Node.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaveResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProcessRaftMessageRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProcessRaftMessageRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProcessRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Message == nil {
+ m.Message = &raftpb.Message{}
+ }
+ if err := m.Message.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProcessRaftMessageResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProcessRaftMessageResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProcessRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResolveAddressRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResolveAddressRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResolveAddressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType)
+ }
+ m.RaftID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.RaftID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResolveAddressResponse) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResolveAddressResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResolveAddressResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addr = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InternalRaftRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Action = append(m.Action, &StoreAction{})
+ if err := m.Action[len(m.Action)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StoreAction) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StoreAction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StoreAction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Action |= (StoreActionKind(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &Node{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Target = &StoreAction_Node{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &Service{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Target = &StoreAction_Service{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &Task{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Target = &StoreAction_Task{v}
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &Network{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Target = &StoreAction_Network{v}
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &Cluster{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Target = &StoreAction_Cluster{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRaft(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRaft
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRaft(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorRaft = []byte{
+ // 757 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x95, 0xcf, 0x52, 0xd3, 0x40,
+ 0x1c, 0xc7, 0xfb, 0x27, 0xb4, 0xba, 0xe5, 0xdf, 0x2c, 0x82, 0x25, 0x32, 0x05, 0xc2, 0x41, 0x60,
+ 0x24, 0x1d, 0xeb, 0x41, 0x47, 0xbd, 0xb4, 0xa5, 0x33, 0x54, 0xa0, 0x65, 0x42, 0xab, 0xdc, 0x30,
+ 0x4d, 0x96, 0x1a, 0xdb, 0x66, 0x6b, 0x76, 0x5b, 0xc6, 0x8b, 0xc3, 0xd1, 0xe1, 0x05, 0xf4, 0xe2,
+ 0x49, 0xcf, 0x3e, 0x80, 0x4f, 0xc0, 0x78, 0xf2, 0xe8, 0x78, 0x50, 0xe1, 0x01, 0xd4, 0x47, 0x70,
+ 0x77, 0x93, 0x20, 0x96, 0xb4, 0x70, 0xd8, 0x76, 0xbb, 0xfb, 0xf9, 0x7e, 0xbf, 0x9b, 0xdd, 0xec,
+ 0xaf, 0x00, 0x38, 0xfa, 0x1e, 0x55, 0xdb, 0x0e, 0xa6, 0x18, 0x42, 0x13, 0x1b, 0x0d, 0xe4, 0xa8,
+ 0x64, 0x5f, 0x77, 0x5a, 0x0d, 0x8b, 0xaa, 0xdd, 0xdb, 0xf2, 0x08, 0xae, 0x3d, 0x47, 0x06, 0x25,
+ 0x2e, 0x22, 0x27, 0xe8, 0xcb, 0x36, 0xf2, 0x7f, 0xac, 0xd4, 0x2d, 0xfa, 0xac, 0x53, 0x53, 0x0d,
+ 0xdc, 0x4a, 0x1b, 0xd8, 0x41, 0x98, 0xa4, 0x11, 0x35, 0xcc, 0x34, 0xb7, 0x14, 0x1f, 0xed, 0x5a,
+ 0xfa, 0x9f, 0xbd, 0x7c, 0xad, 0x8e, 0xeb, 0x58, 0x74, 0xd3, 0xbc, 0xe7, 0x8d, 0x4e, 0xb4, 0x9b,
+ 0x9d, 0xba, 0x65, 0xa7, 0xdd, 0x2f, 0x77, 0x50, 0x99, 0x07, 0x89, 0x47, 0xd8, 0xb2, 0x35, 0xf4,
+ 0xa2, 0x83, 0x08, 0x85, 0x10, 0x48, 0xba, 0x69, 0x3a, 0xc9, 0xf0, 0x5c, 0x78, 0xf1, 0xaa, 0x26,
+ 0xfa, 0xca, 0x1a, 0x18, 0x76, 0x11, 0xd2, 0xc6, 0x36, 0x41, 0xf0, 0x1e, 0x88, 0xb7, 0x50, 0xab,
+ 0x86, 0x1c, 0xc2, 0xb0, 0xe8, 0x62, 0x22, 0x93, 0x52, 0xcf, 0x3f, 0x8e, 0xaa, 0xb1, 0xe5, 0x6c,
+ 0x0a, 0x4c, 0xf3, 0x71, 0x25, 0x07, 0x86, 0x37, 0x90, 0xde, 0x45, 0x7e, 0x5a, 0x06, 0x48, 0x36,
+ 0x36, 0x91, 0x48, 0xbb, 0xd8, 0x46, 0xb0, 0xca, 0x18, 0x18, 0xf1, 0x3c, 0xdc, 0xe5, 0x28, 0x1b,
+ 0x60, 0x7a, 0xcb, 0xc1, 0x06, 0x22, 0xc4, 0x65, 0x09, 0xd1, 0xeb, 0xa7, 0x09, 0x4b, 0x7c, 0xad,
+ 0x62, 0xc4, 0x0b, 0x19, 0x53, 0xdd, 0xed, 0x52, 0x7d, 0xd0, 0x9f, 0xbf, 0x2f, 0x1d, 0xbc, 0x51,
+ 0x42, 0xca, 0x0c, 0x90, 0x83, 0xdc, 0xbc, 0xac, 0x87, 0x60, 0x92, 0xf5, 0x71, 0xb3, 0x8b, 0xb2,
+ 0x6c, 0x67, 0x38, 0xe4, 0xe5, 0x2c, 0x80, 0x38, 0xf7, 0xdd, 0xb5, 0x4c, 0x91, 0x23, 0xe5, 0xc0,
+ 0xc9, 0xf7, 0xd9, 0x18, 0xb7, 0x28, 0xae, 0x6a, 0x31, 0x3e, 0x55, 0x34, 0x95, 0x5b, 0x60, 0xaa,
+ 0x57, 0xed, 0x6d, 0x69, 0xd0, 0xb6, 0xef, 0x81, 0x89, 0xa2, 0x4d, 0x91, 0x63, 0xeb, 0x4d, 0xee,
+ 0xe3, 0x27, 0x4d, 0x81, 0xc8, 0x69, 0x48, 0x8c, 0x85, 0x44, 0x58, 0x00, 0x1b, 0x81, 0x77, 0x41,
+ 0x4c, 0x37, 0xa8, 0x85, 0xed, 0x64, 0x44, 0x1c, 0xca, 0x6c, 0xd0, 0x6e, 0x6e, 0x53, 0xf6, 0xf2,
+ 0x64, 0x05, 0xa6, 0x79, 0xb8, 0xf2, 0x23, 0x02, 0x12, 0x67, 0xc6, 0xe1, 0x83, 0x53, 0x23, 0x1e,
+ 0x32, 0x9a, 0x59, 0xb8, 0xc0, 0x68, 0xdd, 0xb2, 0x4d, 0xdf, 0x0c, 0xaa, 0xde, 0x89, 0x46, 0xc4,
+ 0x66, 0x27, 0x83, 0xa4, 0x25, 0x36, 0xbf, 0x16, 0x72, 0x4f, 0x93, 0xad, 0x3a, 0x4e, 0x90, 0xd3,
+ 0xb5, 0x0c, 0x94, 0x8c, 0x0a, 0xc9, 0x8d, 0xc0, 0x34, 0x17, 0x61, 0x2a, 0x9f, 0xe6, 0x41, 0x54,
+ 0x27, 0x8d, 0xa4, 0xd4, 0x3f, 0xa8, 0xc2, 0xe6, 0x79, 0x10, 0xe7, 0x78, 0x90, 0x8d, 0xe8, 0x3e,
+ 0x76, 0x1a, 0xc9, 0xa1, 0xfe, 0x41, 0x25, 0x17, 0xe1, 0x41, 0x1e, 0xcd, 0x85, 0x46, 0xb3, 0x43,
+ 0xd8, 0x41, 0x24, 0x63, 0xfd, 0x85, 0x79, 0x17, 0xe1, 0x42, 0x8f, 0xce, 0x5d, 0x01, 0x31, 0xaa,
+ 0x3b, 0x75, 0x44, 0x97, 0x7f, 0x87, 0xc1, 0x58, 0xcf, 0x86, 0xc1, 0x9b, 0x20, 0x5e, 0x2d, 0xad,
+ 0x97, 0xca, 0x4f, 0x4a, 0xe3, 0x21, 0x59, 0x3e, 0x7c, 0x37, 0x37, 0xd5, 0x43, 0x54, 0xed, 0x86,
+ 0x8d, 0xf7, 0x6d, 0x76, 0x47, 0x26, 0xb6, 0x2b, 0x65, 0xad, 0xb0, 0x9b, 0xcd, 0x57, 0x8a, 0xe5,
+ 0xd2, 0x6e, 0x5e, 0x2b, 0x64, 0x2b, 0x85, 0xf1, 0xb0, 0x3c, 0xcd, 0x44, 0x93, 0x3d, 0xa2, 0xbc,
+ 0x83, 0x74, 0x8a, 0xce, 0x69, 0xaa, 0x5b, 0xab, 0x5c, 0x13, 0x09, 0xd4, 0x54, 0xdb, 0x66, 0x90,
+ 0x46, 0x2b, 0x6c, 0x96, 0x1f, 0x17, 0xc6, 0xa3, 0x81, 0x1a, 0x0d, 0xb5, 0x70, 0x17, 0xc9, 0xd7,
+ 0x5f, 0xbf, 0x4f, 0x85, 0x3e, 0x7d, 0x48, 0xf5, 0x3e, 0x5d, 0xe6, 0x5b, 0x14, 0x48, 0xfc, 0xa5,
+ 0x85, 0x4f, 0x81, 0xc4, 0x6b, 0x07, 0x0c, 0x7c, 0x1b, 0xcf, 0x14, 0x1e, 0x79, 0xae, 0x3f, 0xe0,
+ 0xdd, 0xbd, 0xc9, 0xcf, 0x1f, 0x7f, 0xbd, 0x8d, 0xb0, 0xeb, 0x2f, 0x88, 0x95, 0x96, 0x6e, 0xb3,
+ 0x9b, 0xe9, 0x40, 0x03, 0x0c, 0x89, 0x7a, 0x00, 0x03, 0x1d, 0xce, 0x96, 0x1b, 0x79, 0x7e, 0x00,
+ 0x31, 0x38, 0xe4, 0x30, 0x0c, 0xe0, 0xf9, 0xb2, 0x00, 0x57, 0x82, 0x0c, 0xfb, 0x16, 0x23, 0x59,
+ 0xbd, 0x2c, 0x3e, 0x78, 0x31, 0xaf, 0xc0, 0xe8, 0xff, 0x65, 0x04, 0x2e, 0x05, 0x56, 0xce, 0xa0,
+ 0x42, 0x25, 0x2f, 0x5f, 0x06, 0x1d, 0x98, 0x9f, 0x9b, 0x39, 0x3a, 0x4e, 0x85, 0xbe, 0xb2, 0xf6,
+ 0xe7, 0x38, 0x15, 0x3e, 0x38, 0x49, 0x85, 0x8f, 0x58, 0xfb, 0xc2, 0xda, 0x4f, 0xd6, 0x76, 0xa2,
+ 0x3b, 0x52, 0x2d, 0x26, 0xfe, 0x5b, 0xee, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xbf, 0x94,
+ 0x02, 0xf3, 0x06, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/raft.proto b/vendor/src/github.com/docker/swarmkit/api/raft.proto
new file mode 100644
index 0000000000..4c54a16e8e
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/raft.proto
@@ -0,0 +1,100 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "objects.proto";
+import "types.proto";
+import "github.com/coreos/etcd/raft/raftpb/raft.proto";
+import weak "gogoproto/gogo.proto";
+import weak "plugin/plugin.proto";
+
+// Raft defines the RPC communication between raft nodes.
+service Raft {
+ // Join adds a RaftMember to the raft cluster.
+ rpc Join(JoinRequest) returns (JoinResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
+ // Leave removes a RaftMember from the raft cluster.
+ rpc Leave(LeaveRequest) returns (LeaveResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
+ // ProcessRaftMessage sends a raft message to be processed on a raft member, it is
+ // called from the RaftMember willing to send a message to its destination ('To' field)
+ rpc ProcessRaftMessage(ProcessRaftMessageRequest) returns (ProcessRaftMessageResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
+ // ResolveAddress returns the address where the node with the given ID can be reached.
+ rpc ResolveAddress(ResolveAddressRequest) returns (ResolveAddressResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+}
+
+message JoinRequest {
+ // Addr specifies the address of the member
+ string addr = 1;
+}
+
+message JoinResponse {
+ repeated RaftMember members = 1;
+}
+
+message LeaveRequest {
+ RaftMember node = 1;
+}
+
+message LeaveResponse {}
+
+message ProcessRaftMessageRequest {
+ option (docker.protobuf.plugin.deepcopy) = false;
+ raftpb.Message message = 1;
+}
+
+message ProcessRaftMessageResponse {}
+
+message ResolveAddressRequest {
+ // raft_id is the ID to resolve to an address.
+ uint64 raft_id = 1 [(gogoproto.customname) = "RaftID"];
+}
+
+message ResolveAddressResponse {
+ // Addr specifies the address of the member
+ string addr = 1;
+}
+
+// Contains one of many protobuf encoded objects to replicate
+// over the raft backend with a request ID to track when the
+// action is effectively applied
+message InternalRaftRequest {
+ uint64 id = 1 [(gogoproto.customname) = "ID"];
+
+ repeated StoreAction action = 2;
+}
+
+// TODO(stevvooe): Storage actions may belong in another protobuf file. They
+// aren't necessarily first-class "types" in the cluster schema.
+
+// StoreActionKind defines the operation to take on the store for the target of
+// a storage action.
+enum StoreActionKind {
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "StoreActionKind";
+ UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StoreActionKindUnknown"]; // default value, invalid
+ STORE_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "StoreActionKindCreate"];
+ STORE_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "StoreActionKindUpdate"];
+ STORE_ACTION_REMOVE = 3 [(gogoproto.enumvalue_customname) = "StoreActionKindRemove"];
+}
+
+// StoreAction defines a taret and operation to apply on the storage system.
+message StoreAction {
+ StoreActionKind action = 1;
+ oneof target {
+ Node node = 2;
+ Service service = 3;
+ Task task = 4;
+ Network network = 5;
+ Cluster cluster = 6;
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go b/vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go
new file mode 100644
index 0000000000..9bc08c0511
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go
@@ -0,0 +1,1115 @@
+// Code generated by protoc-gen-gogo.
+// source: snapshot.proto
+// DO NOT EDIT!
+
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Snapshot_Version int32
+
+const (
+ // V0 is the initial version of the StoreSnapshot message.
+ Snapshot_V0 Snapshot_Version = 0
+)
+
+var Snapshot_Version_name = map[int32]string{
+ 0: "V0",
+}
+var Snapshot_Version_value = map[string]int32{
+ "V0": 0,
+}
+
+func (x Snapshot_Version) String() string {
+ return proto.EnumName(Snapshot_Version_name, int32(x))
+}
+func (Snapshot_Version) EnumDescriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2, 0} }
+
+// StoreSnapshot is used to store snapshots of the store.
+type StoreSnapshot struct {
+ Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"`
+ Services []*Service `protobuf:"bytes,2,rep,name=services" json:"services,omitempty"`
+ Networks []*Network `protobuf:"bytes,3,rep,name=networks" json:"networks,omitempty"`
+ Tasks []*Task `protobuf:"bytes,4,rep,name=tasks" json:"tasks,omitempty"`
+ Clusters []*Cluster `protobuf:"bytes,5,rep,name=clusters" json:"clusters,omitempty"`
+}
+
+func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} }
+func (*StoreSnapshot) ProtoMessage() {}
+func (*StoreSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} }
+
+// ClusterSnapshot stores cluster membership information in snapshots.
+type ClusterSnapshot struct {
+ Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"`
+ Removed []uint64 `protobuf:"varint,2,rep,name=removed" json:"removed,omitempty"`
+}
+
+func (m *ClusterSnapshot) Reset() { *m = ClusterSnapshot{} }
+func (*ClusterSnapshot) ProtoMessage() {}
+func (*ClusterSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} }
+
+type Snapshot struct {
+ Version Snapshot_Version `protobuf:"varint,1,opt,name=version,proto3,enum=docker.swarmkit.v1.Snapshot_Version" json:"version,omitempty"`
+ Membership ClusterSnapshot `protobuf:"bytes,2,opt,name=membership" json:"membership"`
+ Store StoreSnapshot `protobuf:"bytes,3,opt,name=store" json:"store"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} }
+
+func init() {
+ proto.RegisterType((*StoreSnapshot)(nil), "docker.swarmkit.v1.StoreSnapshot")
+ proto.RegisterType((*ClusterSnapshot)(nil), "docker.swarmkit.v1.ClusterSnapshot")
+ proto.RegisterType((*Snapshot)(nil), "docker.swarmkit.v1.Snapshot")
+ proto.RegisterEnum("docker.swarmkit.v1.Snapshot_Version", Snapshot_Version_name, Snapshot_Version_value)
+}
+
+func (m *StoreSnapshot) Copy() *StoreSnapshot {
+ if m == nil {
+ return nil
+ }
+
+ o := &StoreSnapshot{}
+
+ if m.Nodes != nil {
+ o.Nodes = make([]*Node, 0, len(m.Nodes))
+ for _, v := range m.Nodes {
+ o.Nodes = append(o.Nodes, v.Copy())
+ }
+ }
+
+ if m.Services != nil {
+ o.Services = make([]*Service, 0, len(m.Services))
+ for _, v := range m.Services {
+ o.Services = append(o.Services, v.Copy())
+ }
+ }
+
+ if m.Networks != nil {
+ o.Networks = make([]*Network, 0, len(m.Networks))
+ for _, v := range m.Networks {
+ o.Networks = append(o.Networks, v.Copy())
+ }
+ }
+
+ if m.Tasks != nil {
+ o.Tasks = make([]*Task, 0, len(m.Tasks))
+ for _, v := range m.Tasks {
+ o.Tasks = append(o.Tasks, v.Copy())
+ }
+ }
+
+ if m.Clusters != nil {
+ o.Clusters = make([]*Cluster, 0, len(m.Clusters))
+ for _, v := range m.Clusters {
+ o.Clusters = append(o.Clusters, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
+ if m == nil {
+ return nil
+ }
+
+ o := &ClusterSnapshot{}
+
+ if m.Members != nil {
+ o.Members = make([]*RaftMember, 0, len(m.Members))
+ for _, v := range m.Members {
+ o.Members = append(o.Members, v.Copy())
+ }
+ }
+
+ if m.Removed != nil {
+ o.Removed = make([]uint64, 0, len(m.Removed))
+ for _, v := range m.Removed {
+ o.Removed = append(o.Removed, v)
+ }
+ }
+
+ return o
+}
+
+func (m *Snapshot) Copy() *Snapshot {
+ if m == nil {
+ return nil
+ }
+
+ o := &Snapshot{
+ Version: m.Version,
+ Membership: *m.Membership.Copy(),
+ Store: *m.Store.Copy(),
+ }
+
+ return o
+}
+
+func (this *StoreSnapshot) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.StoreSnapshot{")
+ if this.Nodes != nil {
+ s = append(s, "Nodes: "+fmt.Sprintf("%#v", this.Nodes)+",\n")
+ }
+ if this.Services != nil {
+ s = append(s, "Services: "+fmt.Sprintf("%#v", this.Services)+",\n")
+ }
+ if this.Networks != nil {
+ s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n")
+ }
+ if this.Tasks != nil {
+ s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n")
+ }
+ if this.Clusters != nil {
+ s = append(s, "Clusters: "+fmt.Sprintf("%#v", this.Clusters)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ClusterSnapshot) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.ClusterSnapshot{")
+ if this.Members != nil {
+ s = append(s, "Members: "+fmt.Sprintf("%#v", this.Members)+",\n")
+ }
+ s = append(s, "Removed: "+fmt.Sprintf("%#v", this.Removed)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Snapshot) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.Snapshot{")
+ s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n")
+ s = append(s, "Membership: "+strings.Replace(this.Membership.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Store: "+strings.Replace(this.Store.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringSnapshot(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringSnapshot(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *StoreSnapshot) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, msg := range m.Nodes {
+ data[i] = 0xa
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Services) > 0 {
+ for _, msg := range m.Services {
+ data[i] = 0x12
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Networks) > 0 {
+ for _, msg := range m.Networks {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Tasks) > 0 {
+ for _, msg := range m.Tasks {
+ data[i] = 0x22
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Clusters) > 0 {
+ for _, msg := range m.Clusters {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ClusterSnapshot) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ClusterSnapshot) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ data[i] = 0xa
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Removed) > 0 {
+ for _, num := range m.Removed {
+ data[i] = 0x10
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(num))
+ }
+ }
+ return i, nil
+}
+
+func (m *Snapshot) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Version != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(m.Version))
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(m.Membership.Size()))
+ n1, err := m.Membership.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x1a
+ i++
+ i = encodeVarintSnapshot(data, i, uint64(m.Store.Size()))
+ n2, err := m.Store.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ return i, nil
+}
+
+func encodeFixed64Snapshot(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Snapshot(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintSnapshot(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+func (m *StoreSnapshot) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, e := range m.Nodes {
+ l = e.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ }
+ }
+ if len(m.Services) > 0 {
+ for _, e := range m.Services {
+ l = e.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ }
+ }
+ if len(m.Networks) > 0 {
+ for _, e := range m.Networks {
+ l = e.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ }
+ }
+ if len(m.Tasks) > 0 {
+ for _, e := range m.Tasks {
+ l = e.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ }
+ }
+ if len(m.Clusters) > 0 {
+ for _, e := range m.Clusters {
+ l = e.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterSnapshot) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ }
+ }
+ if len(m.Removed) > 0 {
+ for _, e := range m.Removed {
+ n += 1 + sovSnapshot(uint64(e))
+ }
+ }
+ return n
+}
+
+func (m *Snapshot) Size() (n int) {
+ var l int
+ _ = l
+ if m.Version != 0 {
+ n += 1 + sovSnapshot(uint64(m.Version))
+ }
+ l = m.Membership.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ l = m.Store.Size()
+ n += 1 + l + sovSnapshot(uint64(l))
+ return n
+}
+
+func sovSnapshot(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozSnapshot(x uint64) (n int) {
+ return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *StoreSnapshot) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StoreSnapshot{`,
+ `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`,
+ `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`,
+ `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`,
+ `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`,
+ `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterSnapshot) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterSnapshot{`,
+ `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`,
+ `Removed:` + fmt.Sprintf("%v", this.Removed) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Snapshot) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Snapshot{`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `Membership:` + strings.Replace(strings.Replace(this.Membership.String(), "ClusterSnapshot", "ClusterSnapshot", 1), `&`, ``, 1) + `,`,
+ `Store:` + strings.Replace(strings.Replace(this.Store.String(), "StoreSnapshot", "StoreSnapshot", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringSnapshot(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *StoreSnapshot) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StoreSnapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StoreSnapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Nodes = append(m.Nodes, &Node{})
+ if err := m.Nodes[len(m.Nodes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Services = append(m.Services, &Service{})
+ if err := m.Services[len(m.Services)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Networks = append(m.Networks, &Network{})
+ if err := m.Networks[len(m.Networks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tasks = append(m.Tasks, &Task{})
+ if err := m.Tasks[len(m.Tasks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Clusters = append(m.Clusters, &Cluster{})
+ if err := m.Clusters[len(m.Clusters)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSnapshot(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterSnapshot) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterSnapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterSnapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &RaftMember{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType)
+ }
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Removed = append(m.Removed, v)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSnapshot(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Snapshot) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ m.Version = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Version |= (Snapshot_Version(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Membership.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Store.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSnapshot(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSnapshot
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipSnapshot(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthSnapshot
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnapshot
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipSnapshot(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorSnapshot = []byte{
+ // 387 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xbf, 0x4e, 0xf3, 0x30,
+ 0x14, 0xc5, 0x9b, 0xf4, 0x4f, 0x2a, 0x57, 0xed, 0xf7, 0x61, 0x31, 0x44, 0x05, 0x05, 0x08, 0x0c,
+ 0x9d, 0x02, 0x94, 0x01, 0x16, 0x18, 0xca, 0xc4, 0x40, 0x07, 0x17, 0x55, 0xac, 0x69, 0x6a, 0xda,
+ 0x50, 0x12, 0x47, 0xb6, 0x49, 0xc5, 0xc6, 0x73, 0xf0, 0x44, 0x1d, 0x19, 0x99, 0x10, 0x65, 0x61,
+ 0xe5, 0x11, 0xb0, 0xe3, 0x24, 0xaa, 0x44, 0xca, 0x70, 0x25, 0xdb, 0xfa, 0x9d, 0x73, 0x6e, 0x6e,
+ 0x2e, 0x68, 0xb1, 0xd0, 0x8d, 0xd8, 0x94, 0x70, 0x27, 0xa2, 0x84, 0x13, 0x08, 0xc7, 0xc4, 0x9b,
+ 0x61, 0xea, 0xb0, 0xb9, 0x4b, 0x83, 0x99, 0xcf, 0x9d, 0xf8, 0xb8, 0xdd, 0x24, 0xa3, 0x7b, 0xec,
+ 0x71, 0xa6, 0x90, 0x76, 0x83, 0x3f, 0x45, 0x38, 0xbb, 0x6c, 0x4e, 0xc8, 0x84, 0x24, 0xc7, 0x43,
+ 0x79, 0x52, 0xaf, 0xf6, 0x8b, 0x0e, 0x9a, 0x03, 0x4e, 0x28, 0x1e, 0xa4, 0xee, 0xd0, 0x01, 0xd5,
+ 0x90, 0x8c, 0x31, 0x33, 0xb5, 0xdd, 0x72, 0xa7, 0xd1, 0x35, 0x9d, 0xdf, 0x39, 0x4e, 0x5f, 0x00,
+ 0x48, 0x61, 0xf0, 0x14, 0xd4, 0x19, 0xa6, 0xb1, 0xef, 0x09, 0x89, 0x9e, 0x48, 0xb6, 0x8a, 0x24,
+ 0x03, 0xc5, 0xa0, 0x1c, 0x96, 0xc2, 0x10, 0xf3, 0x39, 0xa1, 0x33, 0x66, 0x96, 0xd7, 0x0b, 0xfb,
+ 0x8a, 0x41, 0x39, 0x2c, 0x3b, 0xe4, 0x2e, 0x13, 0xaa, 0xca, 0xfa, 0x0e, 0x6f, 0x04, 0x80, 0x14,
+ 0x26, 0x83, 0xbc, 0x87, 0x47, 0xc6, 0x31, 0x65, 0x66, 0x75, 0x7d, 0xd0, 0xa5, 0x62, 0x50, 0x0e,
+ 0xdb, 0x18, 0xfc, 0x4b, 0x1f, 0xf3, 0xe9, 0x9c, 0x01, 0x23, 0xc0, 0xc1, 0x48, 0x5a, 0xa9, 0xf9,
+ 0x58, 0x45, 0x56, 0xc8, 0xbd, 0xe3, 0xd7, 0x09, 0x86, 0x32, 0x1c, 0x9a, 0xc0, 0xa0, 0x38, 0x20,
+ 0x31, 0x1e, 0x27, 0x63, 0xaa, 0xa0, 0xec, 0x6a, 0x7f, 0x69, 0xa0, 0x9e, 0x07, 0x5c, 0x00, 0x23,
+ 0x16, 0xb8, 0x4f, 0x42, 0x11, 0xa0, 0x75, 0x5a, 0xdd, 0x83, 0xc2, 0x69, 0x66, 0xbb, 0x30, 0x54,
+ 0x2c, 0xca, 0x44, 0xf0, 0x0a, 0x80, 0x34, 0x71, 0xea, 0x47, 0x22, 0x49, 0x13, 0x3d, 0xee, 0xff,
+ 0xf1, 0xb9, 0x99, 0x53, 0xaf, 0xb2, 0x78, 0xdf, 0x29, 0xa1, 0x15, 0x31, 0x3c, 0x07, 0x55, 0x26,
+ 0x57, 0x43, 0xfc, 0x1d, 0xe9, 0xb2, 0x57, 0xd8, 0xc8, 0xea, 0xee, 0xa4, 0x1e, 0x4a, 0x65, 0x6f,
+ 0x00, 0x23, 0xed, 0x0e, 0xd6, 0x80, 0x3e, 0x3c, 0xfa, 0x5f, 0xea, 0x6d, 0x2f, 0x96, 0x56, 0xe9,
+ 0x4d, 0xd4, 0xf7, 0xd2, 0xd2, 0x9e, 0x3f, 0x2d, 0x6d, 0x21, 0xea, 0x55, 0xd4, 0x87, 0xa8, 0x5b,
+ 0x7d, 0x54, 0x4b, 0x96, 0xf2, 0xe4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x1e, 0xaa, 0x44, 0xec,
+ 0x02, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/snapshot.proto b/vendor/src/github.com/docker/swarmkit/api/snapshot.proto
new file mode 100644
index 0000000000..009b17d414
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/snapshot.proto
@@ -0,0 +1,40 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "objects.proto";
+import "types.proto";
+import weak "gogoproto/gogo.proto";
+
+// StoreSnapshot is used to store snapshots of the store.
+message StoreSnapshot {
+ // TODO(aaronl): The current method of assembling a StoreSnapshot
+ // structure and marshalling it is not optimal. It may be better to
+ // write out nodes, networks, tasks, etc. one at a time to an io.Writer
+ // using gogo-protobuf's io.DelimitedWriter. A new value of the version
+ // field could support this approach.
+
+ repeated Node nodes = 1;
+ repeated Service services = 2;
+ repeated Network networks = 3;
+ repeated Task tasks = 4;
+ repeated Cluster clusters = 5;
+}
+
+// ClusterSnapshot stores cluster membership information in snapshots.
+message ClusterSnapshot {
+ repeated RaftMember members = 1;
+ repeated uint64 removed = 2;
+}
+
+message Snapshot {
+ enum Version {
+ // V0 is the initial version of the StoreSnapshot message.
+ V0 = 0;
+ }
+
+ Version version = 1;
+
+ ClusterSnapshot membership = 2 [(gogoproto.nullable) = false];
+ StoreSnapshot store = 3 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/specs.pb.go b/vendor/src/github.com/docker/swarmkit/api/specs.pb.go
new file mode 100644
index 0000000000..4b9cfc05fd
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/specs.pb.go
@@ -0,0 +1,3882 @@
+// Code generated by protoc-gen-gogo.
+// source: specs.proto
+// DO NOT EDIT!
+
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import docker_swarmkit_v11 "github.com/docker/swarmkit/api/duration"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type NodeSpec_Membership int32
+
+const (
+ NodeMembershipPending NodeSpec_Membership = 0
+ NodeMembershipAccepted NodeSpec_Membership = 1
+)
+
+var NodeSpec_Membership_name = map[int32]string{
+ 0: "PENDING",
+ 1: "ACCEPTED",
+}
+var NodeSpec_Membership_value = map[string]int32{
+ "PENDING": 0,
+ "ACCEPTED": 1,
+}
+
+func (x NodeSpec_Membership) String() string {
+ return proto.EnumName(NodeSpec_Membership_name, int32(x))
+}
+func (NodeSpec_Membership) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 0} }
+
+type NodeSpec_Availability int32
+
+const (
+ // Active nodes.
+ NodeAvailabilityActive NodeSpec_Availability = 0
+ // Paused nodes won't be considered by the scheduler, preventing any
+ // further task to run on them.
+ NodeAvailabilityPause NodeSpec_Availability = 1
+ // Drained nodes are paused and any task already running on them will
+ // be evicted.
+ NodeAvailabilityDrain NodeSpec_Availability = 2
+)
+
+var NodeSpec_Availability_name = map[int32]string{
+ 0: "ACTIVE",
+ 1: "PAUSE",
+ 2: "DRAIN",
+}
+var NodeSpec_Availability_value = map[string]int32{
+ "ACTIVE": 0,
+ "PAUSE": 1,
+ "DRAIN": 2,
+}
+
+func (x NodeSpec_Availability) String() string {
+ return proto.EnumName(NodeSpec_Availability_name, int32(x))
+}
+func (NodeSpec_Availability) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 1} }
+
+// ResolutionMode specifies the mode of resolution to use for
+// internal loadbalancing between tasks which are all within
+// the cluster. This is sometimes calles east-west data path.
+type EndpointSpec_ResolutionMode int32
+
+const (
+ // VIP resolution mode specifies that the
+ // service resolves to a logical IP and the requests
+ // are sent to that logical IP. Packets hitting that
+ // logical IP are load balanced to a chosen backend.
+ ResolutionModeVirtualIP EndpointSpec_ResolutionMode = 0
+ // DNSRR resolution mode specifies that the
+ // service directly gets resolved to one of the
+ // backend IP and the client directly initiates a
+ // request towards the actual backend. This requires
+ // that the client does not cache the DNS responses
+ // when the DNS response TTL is 0.
+ ResolutionModeDNSRoundRobin EndpointSpec_ResolutionMode = 1
+)
+
+var EndpointSpec_ResolutionMode_name = map[int32]string{
+ 0: "VIP",
+ 1: "DNSRR",
+}
+var EndpointSpec_ResolutionMode_value = map[string]int32{
+ "VIP": 0,
+ "DNSRR": 1,
+}
+
+func (x EndpointSpec_ResolutionMode) String() string {
+ return proto.EnumName(EndpointSpec_ResolutionMode_name, int32(x))
+}
+func (EndpointSpec_ResolutionMode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorSpecs, []int{6, 0}
+}
+
+type NodeSpec struct {
+ Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"`
+ // Role defines the role the node should have.
+ Role NodeRole `protobuf:"varint,2,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"`
+ // Membership controls the admission of the node into the cluster.
+ Membership NodeSpec_Membership `protobuf:"varint,3,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"membership,omitempty"`
+ // Availability allows a user to control the current scheduling status of a
+ // node.
+ Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"`
+}
+
+func (m *NodeSpec) Reset() { *m = NodeSpec{} }
+func (*NodeSpec) ProtoMessage() {}
+func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0} }
+
+// ServiceSpec defines the properties of a service.
+//
+// A service instructs the cluster in orchestrating repeated instances of a
+// template, implemented as tasks. Based on the number of instances, scheduling
+// strategy and restart policy, a number of application-level behaviors can be
+// defined.
+type ServiceSpec struct {
+ Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"`
+ // Task defines the task template this service will spawn.
+ Task TaskSpec `protobuf:"bytes,2,opt,name=task" json:"task"`
+ // Types that are valid to be assigned to Mode:
+ // *ServiceSpec_Replicated
+ // *ServiceSpec_Global
+ Mode isServiceSpec_Mode `protobuf_oneof:"mode"`
+ // UpdateConfig controls the rate and policy of updates.
+ Update *UpdateConfig `protobuf:"bytes,6,opt,name=update" json:"update,omitempty"`
+ Networks []*ServiceSpec_NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"`
+ // Service endpoint specifies the user provided configuration
+ // to properly discover and load balance a service.
+ Endpoint *EndpointSpec `protobuf:"bytes,8,opt,name=endpoint" json:"endpoint,omitempty"`
+}
+
+func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
+func (*ServiceSpec) ProtoMessage() {}
+func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{1} }
+
+type isServiceSpec_Mode interface {
+ isServiceSpec_Mode()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type ServiceSpec_Replicated struct {
+ Replicated *ReplicatedService `protobuf:"bytes,3,opt,name=replicated,oneof"`
+}
+type ServiceSpec_Global struct {
+ Global *GlobalService `protobuf:"bytes,4,opt,name=global,oneof"`
+}
+
+func (*ServiceSpec_Replicated) isServiceSpec_Mode() {}
+func (*ServiceSpec_Global) isServiceSpec_Mode() {}
+
+func (m *ServiceSpec) GetMode() isServiceSpec_Mode {
+ if m != nil {
+ return m.Mode
+ }
+ return nil
+}
+
+func (m *ServiceSpec) GetReplicated() *ReplicatedService {
+ if x, ok := m.GetMode().(*ServiceSpec_Replicated); ok {
+ return x.Replicated
+ }
+ return nil
+}
+
+func (m *ServiceSpec) GetGlobal() *GlobalService {
+ if x, ok := m.GetMode().(*ServiceSpec_Global); ok {
+ return x.Global
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ServiceSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _ServiceSpec_OneofMarshaler, _ServiceSpec_OneofUnmarshaler, _ServiceSpec_OneofSizer, []interface{}{
+ (*ServiceSpec_Replicated)(nil),
+ (*ServiceSpec_Global)(nil),
+ }
+}
+
+func _ServiceSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*ServiceSpec)
+ // mode
+ switch x := m.Mode.(type) {
+ case *ServiceSpec_Replicated:
+ _ = b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Replicated); err != nil {
+ return err
+ }
+ case *ServiceSpec_Global:
+ _ = b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Global); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("ServiceSpec.Mode has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _ServiceSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*ServiceSpec)
+ switch tag {
+ case 3: // mode.replicated
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ReplicatedService)
+ err := b.DecodeMessage(msg)
+ m.Mode = &ServiceSpec_Replicated{msg}
+ return true, err
+ case 4: // mode.global
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(GlobalService)
+ err := b.DecodeMessage(msg)
+ m.Mode = &ServiceSpec_Global{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _ServiceSpec_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*ServiceSpec)
+ // mode
+ switch x := m.Mode.(type) {
+ case *ServiceSpec_Replicated:
+ s := proto.Size(x.Replicated)
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ServiceSpec_Global:
+ s := proto.Size(x.Global)
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// NetworkAttachmentConfig specifies how a service should be attached to a particular network.
+//
+// For now, this is a simple struct, but this can include future information
+// instructing Swarm on how this service should work on the particular
+// network.
+type ServiceSpec_NetworkAttachmentConfig struct {
+ // Target specifies the target network for attachement. This value may be a
+ // network name or identifier. Only identifiers are supported at this time.
+ Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ // Aliases specifies a list of discoverable alternate names for the service on this Target.
+ Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"`
+}
+
+func (m *ServiceSpec_NetworkAttachmentConfig) Reset() { *m = ServiceSpec_NetworkAttachmentConfig{} }
+func (*ServiceSpec_NetworkAttachmentConfig) ProtoMessage() {}
+func (*ServiceSpec_NetworkAttachmentConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptorSpecs, []int{1, 0}
+}
+
+// ReplicatedService set the reconcilation target to certain number of replicas.
+type ReplicatedService struct {
+ Replicas uint64 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"`
+}
+
+func (m *ReplicatedService) Reset() { *m = ReplicatedService{} }
+func (*ReplicatedService) ProtoMessage() {}
+func (*ReplicatedService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{2} }
+
+// GlobalService represent global service.
+type GlobalService struct {
+}
+
+func (m *GlobalService) Reset() { *m = GlobalService{} }
+func (*GlobalService) ProtoMessage() {}
+func (*GlobalService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{3} }
+
+type TaskSpec struct {
+ // Types that are valid to be assigned to Runtime:
+ // *TaskSpec_Container
+ Runtime isTaskSpec_Runtime `protobuf_oneof:"runtime"`
+ // Resource requirements for the container.
+ Resources *ResourceRequirements `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"`
+ // RestartPolicy specifies what to do when a task fails or finishes.
+ Restart *RestartPolicy `protobuf:"bytes,4,opt,name=restart" json:"restart,omitempty"`
+ // Placement specifies node selection constraints
+ Placement *Placement `protobuf:"bytes,5,opt,name=placement" json:"placement,omitempty"`
+}
+
+func (m *TaskSpec) Reset() { *m = TaskSpec{} }
+func (*TaskSpec) ProtoMessage() {}
+func (*TaskSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{4} }
+
+type isTaskSpec_Runtime interface {
+ isTaskSpec_Runtime()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type TaskSpec_Container struct {
+ Container *ContainerSpec `protobuf:"bytes,1,opt,name=container,oneof"`
+}
+
+func (*TaskSpec_Container) isTaskSpec_Runtime() {}
+
+func (m *TaskSpec) GetRuntime() isTaskSpec_Runtime {
+ if m != nil {
+ return m.Runtime
+ }
+ return nil
+}
+
+func (m *TaskSpec) GetContainer() *ContainerSpec {
+ if x, ok := m.GetRuntime().(*TaskSpec_Container); ok {
+ return x.Container
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*TaskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _TaskSpec_OneofMarshaler, _TaskSpec_OneofUnmarshaler, _TaskSpec_OneofSizer, []interface{}{
+ (*TaskSpec_Container)(nil),
+ }
+}
+
+func _TaskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*TaskSpec)
+ // runtime
+ switch x := m.Runtime.(type) {
+ case *TaskSpec_Container:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Container); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("TaskSpec.Runtime has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _TaskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*TaskSpec)
+ switch tag {
+ case 1: // runtime.container
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ContainerSpec)
+ err := b.DecodeMessage(msg)
+ m.Runtime = &TaskSpec_Container{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _TaskSpec_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*TaskSpec)
+ // runtime
+ switch x := m.Runtime.(type) {
+ case *TaskSpec_Container:
+ s := proto.Size(x.Container)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Container specifies runtime parameters for a container.
+type ContainerSpec struct {
+ // image defines the image reference, as specified in the
+ // distribution/reference package. This may include a registry host, name,
+ // tag or digest.
+ //
+ // The field will be directly passed to the engine pulling. Well-behaved
+ // service definitions will used immutable references, either through tags
+ // that don't change or verifiable digests.
+ Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"`
+ // Labels defines labels to be added to the container at creation time. If
+ // collisions with system labels occur, these labels will be overridden.
+ //
+ // This field *must* remain compatible with the Labels field of
+ // Annotations.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Command to run the the container. The first element is a path to the
+ // executable and the following elements are treated as arguments.
+ //
+ // If command is empty, execution will fall back to the image's entrypoint.
+ Command []string `protobuf:"bytes,3,rep,name=command" json:"command,omitempty"`
+ // Args specifies arguments provided to the image's entrypoint.
+ // Ignored if command is specified.
+ Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
+ // Env specifies the environment variables for the container in NAME=VALUE
+ // format. These must be compliant with [IEEE Std
+ // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html).
+ Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
+ // Dir defines the working directory to set for the container process.
+ Dir string `protobuf:"bytes,6,opt,name=dir,proto3" json:"dir,omitempty"`
+ // User specifies the user that should be employed to run the container.
+ User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
+ Mounts []Mount `protobuf:"bytes,8,rep,name=mounts" json:"mounts"`
+ // StopGracePeriod the grace period for stopping the container before
+ // forcefully killing the container.
+ StopGracePeriod *docker_swarmkit_v11.Duration `protobuf:"bytes,9,opt,name=stop_grace_period,json=stopGracePeriod" json:"stop_grace_period,omitempty"`
+}
+
+func (m *ContainerSpec) Reset() { *m = ContainerSpec{} }
+func (*ContainerSpec) ProtoMessage() {}
+func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{5} }
+
+// EndpointSpec defines the properties that can be configured to
+// access and loadbalance the service.
+type EndpointSpec struct {
+ Mode EndpointSpec_ResolutionMode `protobuf:"varint,1,opt,name=mode,proto3,enum=docker.swarmkit.v1.EndpointSpec_ResolutionMode" json:"mode,omitempty"`
+ // List of exposed ports that this service is accessible from
+ // external to the cluster.
+ Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"`
+}
+
+func (m *EndpointSpec) Reset() { *m = EndpointSpec{} }
+func (*EndpointSpec) ProtoMessage() {}
+func (*EndpointSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6} }
+
+// NetworkSpec specifies user defined network parameters.
+type NetworkSpec struct {
+ Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"`
+ // DriverConfig specific configuration consumed by the network driver.
+ DriverConfig *Driver `protobuf:"bytes,2,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"`
+ // IPv6Enabled enables support for IPv6 on the network.
+ Ipv6Enabled bool `protobuf:"varint,3,opt,name=ipv6_enabled,json=ipv6Enabled,proto3" json:"ipv6_enabled,omitempty"`
+ // internal restricts external access to the network. This may be
+ // accomplished by disabling the default gateway or through other means.
+ Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"`
+ IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"`
+}
+
+func (m *NetworkSpec) Reset() { *m = NetworkSpec{} }
+func (*NetworkSpec) ProtoMessage() {}
+func (*NetworkSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7} }
+
+// ClusterSpec specifies global cluster settings.
+type ClusterSpec struct {
+ Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"`
+ // AcceptancePolicy defines the certificate issuance policy.
+ AcceptancePolicy AcceptancePolicy `protobuf:"bytes,2,opt,name=acceptance_policy,json=acceptancePolicy" json:"acceptance_policy"`
+ // Orchestration defines cluster-level orchestration settings.
+ Orchestration OrchestrationConfig `protobuf:"bytes,3,opt,name=orchestration" json:"orchestration"`
+ // Raft defines the cluster's raft settings.
+ Raft RaftConfig `protobuf:"bytes,4,opt,name=raft" json:"raft"`
+ // Dispatcher defines cluster-level dispatcher settings.
+ Dispatcher DispatcherConfig `protobuf:"bytes,5,opt,name=dispatcher" json:"dispatcher"`
+ // CAConfig defines cluster-level certificate authority settings.
+ CAConfig CAConfig `protobuf:"bytes,6,opt,name=ca_config,json=caConfig" json:"ca_config"`
+}
+
+func (m *ClusterSpec) Reset() { *m = ClusterSpec{} }
+func (*ClusterSpec) ProtoMessage() {}
+func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} }
+
+func init() {
+ proto.RegisterType((*NodeSpec)(nil), "docker.swarmkit.v1.NodeSpec")
+ proto.RegisterType((*ServiceSpec)(nil), "docker.swarmkit.v1.ServiceSpec")
+ proto.RegisterType((*ServiceSpec_NetworkAttachmentConfig)(nil), "docker.swarmkit.v1.ServiceSpec.NetworkAttachmentConfig")
+ proto.RegisterType((*ReplicatedService)(nil), "docker.swarmkit.v1.ReplicatedService")
+ proto.RegisterType((*GlobalService)(nil), "docker.swarmkit.v1.GlobalService")
+ proto.RegisterType((*TaskSpec)(nil), "docker.swarmkit.v1.TaskSpec")
+ proto.RegisterType((*ContainerSpec)(nil), "docker.swarmkit.v1.ContainerSpec")
+ proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec")
+ proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec")
+ proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec")
+ proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Membership", NodeSpec_Membership_name, NodeSpec_Membership_value)
+ proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Availability", NodeSpec_Availability_name, NodeSpec_Availability_value)
+ proto.RegisterEnum("docker.swarmkit.v1.EndpointSpec_ResolutionMode", EndpointSpec_ResolutionMode_name, EndpointSpec_ResolutionMode_value)
+}
+
+func (m *NodeSpec) Copy() *NodeSpec {
+ if m == nil {
+ return nil
+ }
+
+ o := &NodeSpec{
+ Annotations: *m.Annotations.Copy(),
+ Role: m.Role,
+ Membership: m.Membership,
+ Availability: m.Availability,
+ }
+
+ return o
+}
+
+func (m *ServiceSpec) Copy() *ServiceSpec {
+ if m == nil {
+ return nil
+ }
+
+ o := &ServiceSpec{
+ Annotations: *m.Annotations.Copy(),
+ Task: *m.Task.Copy(),
+ Update: m.Update.Copy(),
+ Endpoint: m.Endpoint.Copy(),
+ }
+
+ if m.Networks != nil {
+ o.Networks = make([]*ServiceSpec_NetworkAttachmentConfig, 0, len(m.Networks))
+ for _, v := range m.Networks {
+ o.Networks = append(o.Networks, v.Copy())
+ }
+ }
+
+ switch m.Mode.(type) {
+ case *ServiceSpec_Replicated:
+ i := &ServiceSpec_Replicated{
+ Replicated: m.GetReplicated().Copy(),
+ }
+
+ o.Mode = i
+ case *ServiceSpec_Global:
+ i := &ServiceSpec_Global{
+ Global: m.GetGlobal().Copy(),
+ }
+
+ o.Mode = i
+ }
+
+ return o
+}
+
+func (m *ServiceSpec_NetworkAttachmentConfig) Copy() *ServiceSpec_NetworkAttachmentConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &ServiceSpec_NetworkAttachmentConfig{
+ Target: m.Target,
+ }
+
+ if m.Aliases != nil {
+ o.Aliases = make([]string, 0, len(m.Aliases))
+ for _, v := range m.Aliases {
+ o.Aliases = append(o.Aliases, v)
+ }
+ }
+
+ return o
+}
+
+func (m *ReplicatedService) Copy() *ReplicatedService {
+ if m == nil {
+ return nil
+ }
+
+ o := &ReplicatedService{
+ Replicas: m.Replicas,
+ }
+
+ return o
+}
+
+func (m *GlobalService) Copy() *GlobalService {
+ if m == nil {
+ return nil
+ }
+
+ o := &GlobalService{}
+
+ return o
+}
+
+func (m *TaskSpec) Copy() *TaskSpec {
+ if m == nil {
+ return nil
+ }
+
+ o := &TaskSpec{
+ Resources: m.Resources.Copy(),
+ Restart: m.Restart.Copy(),
+ Placement: m.Placement.Copy(),
+ }
+
+ switch m.Runtime.(type) {
+ case *TaskSpec_Container:
+ i := &TaskSpec_Container{
+ Container: m.GetContainer().Copy(),
+ }
+
+ o.Runtime = i
+ }
+
+ return o
+}
+
+func (m *ContainerSpec) Copy() *ContainerSpec {
+ if m == nil {
+ return nil
+ }
+
+ o := &ContainerSpec{
+ Image: m.Image,
+ Dir: m.Dir,
+ User: m.User,
+ StopGracePeriod: m.StopGracePeriod.Copy(),
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ if m.Command != nil {
+ o.Command = make([]string, 0, len(m.Command))
+ for _, v := range m.Command {
+ o.Command = append(o.Command, v)
+ }
+ }
+
+ if m.Args != nil {
+ o.Args = make([]string, 0, len(m.Args))
+ for _, v := range m.Args {
+ o.Args = append(o.Args, v)
+ }
+ }
+
+ if m.Env != nil {
+ o.Env = make([]string, 0, len(m.Env))
+ for _, v := range m.Env {
+ o.Env = append(o.Env, v)
+ }
+ }
+
+ if m.Mounts != nil {
+ o.Mounts = make([]Mount, 0, len(m.Mounts))
+ for _, v := range m.Mounts {
+ o.Mounts = append(o.Mounts, *v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *EndpointSpec) Copy() *EndpointSpec {
+ if m == nil {
+ return nil
+ }
+
+ o := &EndpointSpec{
+ Mode: m.Mode,
+ }
+
+ if m.Ports != nil {
+ o.Ports = make([]*PortConfig, 0, len(m.Ports))
+ for _, v := range m.Ports {
+ o.Ports = append(o.Ports, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *NetworkSpec) Copy() *NetworkSpec {
+ if m == nil {
+ return nil
+ }
+
+ o := &NetworkSpec{
+ Annotations: *m.Annotations.Copy(),
+ DriverConfig: m.DriverConfig.Copy(),
+ Ipv6Enabled: m.Ipv6Enabled,
+ Internal: m.Internal,
+ IPAM: m.IPAM.Copy(),
+ }
+
+ return o
+}
+
+func (m *ClusterSpec) Copy() *ClusterSpec {
+ if m == nil {
+ return nil
+ }
+
+ o := &ClusterSpec{
+ Annotations: *m.Annotations.Copy(),
+ AcceptancePolicy: *m.AcceptancePolicy.Copy(),
+ Orchestration: *m.Orchestration.Copy(),
+ Raft: *m.Raft.Copy(),
+ Dispatcher: *m.Dispatcher.Copy(),
+ CAConfig: *m.CAConfig.Copy(),
+ }
+
+ return o
+}
+
+func (this *NodeSpec) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.NodeSpec{")
+ s = append(s, "Annotations: "+strings.Replace(this.Annotations.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Role: "+fmt.Sprintf("%#v", this.Role)+",\n")
+ s = append(s, "Membership: "+fmt.Sprintf("%#v", this.Membership)+",\n")
+ s = append(s, "Availability: "+fmt.Sprintf("%#v", this.Availability)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ServiceSpec) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 11)
+ s = append(s, "&api.ServiceSpec{")
+ s = append(s, "Annotations: "+strings.Replace(this.Annotations.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Task: "+strings.Replace(this.Task.GoString(), `&`, ``, 1)+",\n")
+ if this.Mode != nil {
+ s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n")
+ }
+ if this.Update != nil {
+ s = append(s, "Update: "+fmt.Sprintf("%#v", this.Update)+",\n")
+ }
+ if this.Networks != nil {
+ s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n")
+ }
+ if this.Endpoint != nil {
+ s = append(s, "Endpoint: "+fmt.Sprintf("%#v", this.Endpoint)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ServiceSpec_Replicated) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.ServiceSpec_Replicated{` +
+ `Replicated:` + fmt.Sprintf("%#v", this.Replicated) + `}`}, ", ")
+ return s
+}
+func (this *ServiceSpec_Global) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.ServiceSpec_Global{` +
+ `Global:` + fmt.Sprintf("%#v", this.Global) + `}`}, ", ")
+ return s
+}
+func (this *ServiceSpec_NetworkAttachmentConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.ServiceSpec_NetworkAttachmentConfig{")
+ s = append(s, "Target: "+fmt.Sprintf("%#v", this.Target)+",\n")
+ s = append(s, "Aliases: "+fmt.Sprintf("%#v", this.Aliases)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ReplicatedService) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ReplicatedService{")
+ s = append(s, "Replicas: "+fmt.Sprintf("%#v", this.Replicas)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GlobalService) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&api.GlobalService{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TaskSpec) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.TaskSpec{")
+ if this.Runtime != nil {
+ s = append(s, "Runtime: "+fmt.Sprintf("%#v", this.Runtime)+",\n")
+ }
+ if this.Resources != nil {
+ s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n")
+ }
+ if this.Restart != nil {
+ s = append(s, "Restart: "+fmt.Sprintf("%#v", this.Restart)+",\n")
+ }
+ if this.Placement != nil {
+ s = append(s, "Placement: "+fmt.Sprintf("%#v", this.Placement)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TaskSpec_Container) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.TaskSpec_Container{` +
+ `Container:` + fmt.Sprintf("%#v", this.Container) + `}`}, ", ")
+ return s
+}
+func (this *ContainerSpec) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 13)
+ s = append(s, "&api.ContainerSpec{")
+ s = append(s, "Image: "+fmt.Sprintf("%#v", this.Image)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n")
+ s = append(s, "Args: "+fmt.Sprintf("%#v", this.Args)+",\n")
+ s = append(s, "Env: "+fmt.Sprintf("%#v", this.Env)+",\n")
+ s = append(s, "Dir: "+fmt.Sprintf("%#v", this.Dir)+",\n")
+ s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n")
+ if this.Mounts != nil {
+ s = append(s, "Mounts: "+fmt.Sprintf("%#v", this.Mounts)+",\n")
+ }
+ if this.StopGracePeriod != nil {
+ s = append(s, "StopGracePeriod: "+fmt.Sprintf("%#v", this.StopGracePeriod)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EndpointSpec) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.EndpointSpec{")
+ s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n")
+ if this.Ports != nil {
+ s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NetworkSpec) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.NetworkSpec{")
+ s = append(s, "Annotations: "+strings.Replace(this.Annotations.GoString(), `&`, ``, 1)+",\n")
+ if this.DriverConfig != nil {
+ s = append(s, "DriverConfig: "+fmt.Sprintf("%#v", this.DriverConfig)+",\n")
+ }
+ s = append(s, "Ipv6Enabled: "+fmt.Sprintf("%#v", this.Ipv6Enabled)+",\n")
+ s = append(s, "Internal: "+fmt.Sprintf("%#v", this.Internal)+",\n")
+ if this.IPAM != nil {
+ s = append(s, "IPAM: "+fmt.Sprintf("%#v", this.IPAM)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ClusterSpec) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&api.ClusterSpec{")
+ s = append(s, "Annotations: "+strings.Replace(this.Annotations.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "AcceptancePolicy: "+strings.Replace(this.AcceptancePolicy.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Orchestration: "+strings.Replace(this.Orchestration.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Raft: "+strings.Replace(this.Raft.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Dispatcher: "+strings.Replace(this.Dispatcher.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "CAConfig: "+strings.Replace(this.CAConfig.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringSpecs(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringSpecs(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *NodeSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Annotations.Size()))
+ n1, err := m.Annotations.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ if m.Role != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Role))
+ }
+ if m.Membership != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Membership))
+ }
+ if m.Availability != 0 {
+ data[i] = 0x20
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Availability))
+ }
+ return i, nil
+}
+
+func (m *ServiceSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Annotations.Size()))
+ n2, err := m.Annotations.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x12
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Task.Size()))
+ n3, err := m.Task.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ if m.Mode != nil {
+ nn4, err := m.Mode.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn4
+ }
+ if m.Update != nil {
+ data[i] = 0x32
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Update.Size()))
+ n5, err := m.Update.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ if len(m.Networks) > 0 {
+ for _, msg := range m.Networks {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.Endpoint != nil {
+ data[i] = 0x42
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Endpoint.Size()))
+ n6, err := m.Endpoint.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+
+func (m *ServiceSpec_Replicated) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Replicated != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Replicated.Size()))
+ n7, err := m.Replicated.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ return i, nil
+}
+func (m *ServiceSpec_Global) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Global != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Global.Size()))
+ n8, err := m.Global.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+func (m *ServiceSpec_NetworkAttachmentConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceSpec_NetworkAttachmentConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Target) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(len(m.Target)))
+ i += copy(data[i:], m.Target)
+ }
+ if len(m.Aliases) > 0 {
+ for _, s := range m.Aliases {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ReplicatedService) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicatedService) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Replicas))
+ }
+ return i, nil
+}
+
+func (m *GlobalService) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GlobalService) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *TaskSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TaskSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Runtime != nil {
+ nn9, err := m.Runtime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn9
+ }
+ if m.Resources != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Resources.Size()))
+ n10, err := m.Resources.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
+ if m.Restart != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Restart.Size()))
+ n11, err := m.Restart.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ }
+ if m.Placement != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Placement.Size()))
+ n12, err := m.Placement.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ }
+ return i, nil
+}
+
+func (m *TaskSpec_Container) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Container != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Container.Size()))
+ n13, err := m.Container.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ return i, nil
+}
+func (m *ContainerSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Image) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(len(m.Image)))
+ i += copy(data[i:], m.Image)
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x12
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v)))
+ i = encodeVarintSpecs(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintSpecs(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Args) > 0 {
+ for _, s := range m.Args {
+ data[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Env) > 0 {
+ for _, s := range m.Env {
+ data[i] = 0x2a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Dir) > 0 {
+ data[i] = 0x32
+ i++
+ i = encodeVarintSpecs(data, i, uint64(len(m.Dir)))
+ i += copy(data[i:], m.Dir)
+ }
+ if len(m.User) > 0 {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(len(m.User)))
+ i += copy(data[i:], m.User)
+ }
+ if len(m.Mounts) > 0 {
+ for _, msg := range m.Mounts {
+ data[i] = 0x42
+ i++
+ i = encodeVarintSpecs(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.StopGracePeriod != nil {
+ data[i] = 0x4a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.StopGracePeriod.Size()))
+ n14, err := m.StopGracePeriod.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ return i, nil
+}
+
+func (m *EndpointSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EndpointSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Mode != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Mode))
+ }
+ if len(m.Ports) > 0 {
+ for _, msg := range m.Ports {
+ data[i] = 0x12
+ i++
+ i = encodeVarintSpecs(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NetworkSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Annotations.Size()))
+ n15, err := m.Annotations.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ if m.DriverConfig != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.DriverConfig.Size()))
+ n16, err := m.DriverConfig.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ if m.Ipv6Enabled {
+ data[i] = 0x18
+ i++
+ if m.Ipv6Enabled {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.Internal {
+ data[i] = 0x20
+ i++
+ if m.Internal {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.IPAM != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.IPAM.Size()))
+ n17, err := m.IPAM.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ return i, nil
+}
+
+func (m *ClusterSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ClusterSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Annotations.Size()))
+ n18, err := m.Annotations.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ data[i] = 0x12
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.AcceptancePolicy.Size()))
+ n19, err := m.AcceptancePolicy.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ data[i] = 0x1a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Orchestration.Size()))
+ n20, err := m.Orchestration.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ data[i] = 0x22
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Raft.Size()))
+ n21, err := m.Raft.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ data[i] = 0x2a
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.Dispatcher.Size()))
+ n22, err := m.Dispatcher.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ data[i] = 0x32
+ i++
+ i = encodeVarintSpecs(data, i, uint64(m.CAConfig.Size()))
+ n23, err := m.CAConfig.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ return i, nil
+}
+
+func encodeFixed64Specs(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Specs(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintSpecs(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+func (m *NodeSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Annotations.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ if m.Role != 0 {
+ n += 1 + sovSpecs(uint64(m.Role))
+ }
+ if m.Membership != 0 {
+ n += 1 + sovSpecs(uint64(m.Membership))
+ }
+ if m.Availability != 0 {
+ n += 1 + sovSpecs(uint64(m.Availability))
+ }
+ return n
+}
+
+func (m *ServiceSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Annotations.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ l = m.Task.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ if m.Mode != nil {
+ n += m.Mode.Size()
+ }
+ if m.Update != nil {
+ l = m.Update.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ if len(m.Networks) > 0 {
+ for _, e := range m.Networks {
+ l = e.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ }
+ if m.Endpoint != nil {
+ l = m.Endpoint.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ return n
+}
+
+func (m *ServiceSpec_Replicated) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicated != nil {
+ l = m.Replicated.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ return n
+}
+func (m *ServiceSpec_Global) Size() (n int) {
+ var l int
+ _ = l
+ if m.Global != nil {
+ l = m.Global.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ return n
+}
+func (m *ServiceSpec_NetworkAttachmentConfig) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Target)
+ if l > 0 {
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ if len(m.Aliases) > 0 {
+ for _, s := range m.Aliases {
+ l = len(s)
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ReplicatedService) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != 0 {
+ n += 1 + sovSpecs(uint64(m.Replicas))
+ }
+ return n
+}
+
+func (m *GlobalService) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *TaskSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Runtime != nil {
+ n += m.Runtime.Size()
+ }
+ if m.Resources != nil {
+ l = m.Resources.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ if m.Restart != nil {
+ l = m.Restart.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ if m.Placement != nil {
+ l = m.Placement.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ return n
+}
+
+func (m *TaskSpec_Container) Size() (n int) {
+ var l int
+ _ = l
+ if m.Container != nil {
+ l = m.Container.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ return n
+}
+func (m *ContainerSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Image)
+ if l > 0 {
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v)))
+ n += mapEntrySize + 1 + sovSpecs(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ l = len(s)
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ }
+ if len(m.Args) > 0 {
+ for _, s := range m.Args {
+ l = len(s)
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ }
+ if len(m.Env) > 0 {
+ for _, s := range m.Env {
+ l = len(s)
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ }
+ l = len(m.Dir)
+ if l > 0 {
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ l = len(m.User)
+ if l > 0 {
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ if len(m.Mounts) > 0 {
+ for _, e := range m.Mounts {
+ l = e.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ }
+ if m.StopGracePeriod != nil {
+ l = m.StopGracePeriod.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ return n
+}
+
+func (m *EndpointSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Mode != 0 {
+ n += 1 + sovSpecs(uint64(m.Mode))
+ }
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetworkSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Annotations.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ if m.DriverConfig != nil {
+ l = m.DriverConfig.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ if m.Ipv6Enabled {
+ n += 2
+ }
+ if m.Internal {
+ n += 2
+ }
+ if m.IPAM != nil {
+ l = m.IPAM.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ }
+ return n
+}
+
+func (m *ClusterSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Annotations.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ l = m.AcceptancePolicy.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ l = m.Orchestration.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ l = m.Raft.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ l = m.Dispatcher.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ l = m.CAConfig.Size()
+ n += 1 + l + sovSpecs(uint64(l))
+ return n
+}
+
+func sovSpecs(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozSpecs(x uint64) (n int) {
+ return sovSpecs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *NodeSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NodeSpec{`,
+ `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`,
+ `Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+ `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`,
+ `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceSpec{`,
+ `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`,
+ `Task:` + strings.Replace(strings.Replace(this.Task.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`,
+ `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`,
+ `Update:` + strings.Replace(fmt.Sprintf("%v", this.Update), "UpdateConfig", "UpdateConfig", 1) + `,`,
+ `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "ServiceSpec_NetworkAttachmentConfig", "ServiceSpec_NetworkAttachmentConfig", 1) + `,`,
+ `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "EndpointSpec", "EndpointSpec", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceSpec_Replicated) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceSpec_Replicated{`,
+ `Replicated:` + strings.Replace(fmt.Sprintf("%v", this.Replicated), "ReplicatedService", "ReplicatedService", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceSpec_Global) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceSpec_Global{`,
+ `Global:` + strings.Replace(fmt.Sprintf("%v", this.Global), "GlobalService", "GlobalService", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceSpec_NetworkAttachmentConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceSpec_NetworkAttachmentConfig{`,
+ `Target:` + fmt.Sprintf("%v", this.Target) + `,`,
+ `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicatedService) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicatedService{`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GlobalService) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GlobalService{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TaskSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TaskSpec{`,
+ `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
+ `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
+ `Restart:` + strings.Replace(fmt.Sprintf("%v", this.Restart), "RestartPolicy", "RestartPolicy", 1) + `,`,
+ `Placement:` + strings.Replace(fmt.Sprintf("%v", this.Placement), "Placement", "Placement", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TaskSpec_Container) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TaskSpec_Container{`,
+ `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerSpec", "ContainerSpec", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ContainerSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&ContainerSpec{`,
+ `Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+ `Args:` + fmt.Sprintf("%v", this.Args) + `,`,
+ `Env:` + fmt.Sprintf("%v", this.Env) + `,`,
+ `Dir:` + fmt.Sprintf("%v", this.Dir) + `,`,
+ `User:` + fmt.Sprintf("%v", this.User) + `,`,
+ `Mounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "Mount", 1), `&`, ``, 1) + `,`,
+ `StopGracePeriod:` + strings.Replace(fmt.Sprintf("%v", this.StopGracePeriod), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EndpointSpec{`,
+ `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`,
+ `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkSpec{`,
+ `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`,
+ `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`,
+ `Ipv6Enabled:` + fmt.Sprintf("%v", this.Ipv6Enabled) + `,`,
+ `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`,
+ `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterSpec{`,
+ `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`,
+ `AcceptancePolicy:` + strings.Replace(strings.Replace(this.AcceptancePolicy.String(), "AcceptancePolicy", "AcceptancePolicy", 1), `&`, ``, 1) + `,`,
+ `Orchestration:` + strings.Replace(strings.Replace(this.Orchestration.String(), "OrchestrationConfig", "OrchestrationConfig", 1), `&`, ``, 1) + `,`,
+ `Raft:` + strings.Replace(strings.Replace(this.Raft.String(), "RaftConfig", "RaftConfig", 1), `&`, ``, 1) + `,`,
+ `Dispatcher:` + strings.Replace(strings.Replace(this.Dispatcher.String(), "DispatcherConfig", "DispatcherConfig", 1), `&`, ``, 1) + `,`,
+ `CAConfig:` + strings.Replace(strings.Replace(this.CAConfig.String(), "CAConfig", "CAConfig", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringSpecs(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *NodeSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Annotations.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ m.Role = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Role |= (NodeRole(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType)
+ }
+ m.Membership = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Membership |= (NodeSpec_Membership(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType)
+ }
+ m.Availability = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Annotations.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Task.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicated", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &ReplicatedService{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Mode = &ServiceSpec_Replicated{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &GlobalService{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Mode = &ServiceSpec_Global{v}
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Update == nil {
+ m.Update = &UpdateConfig{}
+ }
+ if err := m.Update.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Networks = append(m.Networks, &ServiceSpec_NetworkAttachmentConfig{})
+ if err := m.Networks[len(m.Networks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Endpoint == nil {
+ m.Endpoint = &EndpointSpec{}
+ }
+ if err := m.Endpoint.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceSpec_NetworkAttachmentConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkAttachmentConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkAttachmentConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Target = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Aliases = append(m.Aliases, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicatedService) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicatedService: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicatedService: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GlobalService) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GlobalService: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GlobalService: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TaskSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TaskSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TaskSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &ContainerSpec{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Runtime = &TaskSpec_Container{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Resources == nil {
+ m.Resources = &ResourceRequirements{}
+ }
+ if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Restart", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Restart == nil {
+ m.Restart = &RestartPolicy{}
+ }
+ if err := m.Restart.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Placement", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Placement == nil {
+ m.Placement = &Placement{}
+ }
+ if err := m.Placement.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Image = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Args = append(m.Args, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Dir = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Mounts = append(m.Mounts, Mount{})
+ if err := m.Mounts[len(m.Mounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StopGracePeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StopGracePeriod == nil {
+ m.StopGracePeriod = &docker_swarmkit_v11.Duration{}
+ }
+ if err := m.StopGracePeriod.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
+ }
+ m.Mode = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Mode |= (EndpointSpec_ResolutionMode(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, &PortConfig{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Annotations.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DriverConfig == nil {
+ m.DriverConfig = &Driver{}
+ }
+ if err := m.DriverConfig.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ipv6Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Ipv6Enabled = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Internal = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IPAM == nil {
+ m.IPAM = &IPAMOptions{}
+ }
+ if err := m.IPAM.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Annotations.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AcceptancePolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.AcceptancePolicy.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Orchestration", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Orchestration.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Raft", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Raft.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dispatcher", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Dispatcher.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CAConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CAConfig.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSpecs(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSpecs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipSpecs(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthSpecs
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSpecs
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipSpecs(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthSpecs = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSpecs = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorSpecs = []byte{
+ // 1224 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
+ 0x18, 0x8e, 0xe3, 0x8d, 0xb3, 0x9e, 0x4d, 0x5a, 0x77, 0x54, 0x5a, 0xd7, 0x2d, 0x49, 0x6a, 0x15,
+ 0x28, 0x48, 0x38, 0x60, 0x50, 0x5b, 0xbe, 0x84, 0xb6, 0xb6, 0x49, 0x4d, 0x89, 0x6b, 0x4d, 0xda,
+ 0x70, 0x8c, 0xc6, 0xbb, 0x53, 0x67, 0x95, 0xf5, 0xce, 0x32, 0x3b, 0x76, 0x95, 0x1b, 0xc7, 0x8a,
+ 0x03, 0xe2, 0xc2, 0x05, 0x89, 0x13, 0xff, 0x81, 0xdf, 0x90, 0x23, 0x37, 0x38, 0x55, 0xb4, 0xbf,
+ 0x00, 0x89, 0x3f, 0xc0, 0x3b, 0xb3, 0x63, 0x7b, 0x4d, 0x37, 0xed, 0x25, 0x87, 0x95, 0xe6, 0xe3,
+ 0x79, 0x9e, 0x99, 0x79, 0x3f, 0x17, 0x39, 0x49, 0xcc, 0xbc, 0xa4, 0x11, 0x0b, 0x2e, 0x39, 0xc6,
+ 0x3e, 0xf7, 0x8e, 0x98, 0x68, 0x24, 0x4f, 0xa8, 0x18, 0x1d, 0x05, 0xb2, 0x31, 0xf9, 0xb0, 0xe6,
+ 0xc8, 0xe3, 0x98, 0x19, 0x40, 0xed, 0xe2, 0x90, 0x0f, 0xb9, 0x1e, 0x6e, 0xab, 0x91, 0x59, 0xbd,
+ 0xec, 0x8f, 0x05, 0x95, 0x01, 0x8f, 0xb6, 0xa7, 0x83, 0x74, 0xa3, 0xfe, 0xa3, 0x85, 0xec, 0x1e,
+ 0xf7, 0xd9, 0x1e, 0x9c, 0x81, 0x77, 0x90, 0x43, 0xa3, 0x88, 0x4b, 0x0d, 0x48, 0xaa, 0x85, 0xad,
+ 0xc2, 0x4d, 0xa7, 0xb9, 0xd9, 0x78, 0xf9, 0xc8, 0x86, 0x3b, 0x87, 0xdd, 0xb5, 0x4e, 0x9e, 0x6d,
+ 0x2e, 0x91, 0x2c, 0x13, 0x7f, 0x80, 0x2c, 0xc1, 0x43, 0x56, 0x5d, 0x06, 0x85, 0x73, 0xcd, 0x6b,
+ 0x79, 0x0a, 0xea, 0x50, 0x02, 0x18, 0xa2, 0x91, 0x70, 0x34, 0x1a, 0xb1, 0xd1, 0x80, 0x89, 0xe4,
+ 0x30, 0x88, 0xab, 0x45, 0xcd, 0x7b, 0xe7, 0x34, 0x9e, 0xba, 0x6c, 0x63, 0x77, 0x06, 0x27, 0x19,
+ 0x2a, 0xde, 0x45, 0x6b, 0x74, 0x42, 0x83, 0x90, 0x0e, 0x82, 0x30, 0x90, 0xc7, 0x55, 0x4b, 0x4b,
+ 0xbd, 0xfb, 0x4a, 0x29, 0x37, 0x43, 0x20, 0x0b, 0xf4, 0xba, 0x8f, 0xd0, 0xfc, 0x20, 0xfc, 0x36,
+ 0x5a, 0xed, 0x77, 0x7a, 0xed, 0x6e, 0x6f, 0xa7, 0xb2, 0x54, 0xbb, 0xf2, 0xc3, 0xaf, 0x5b, 0x6f,
+ 0x28, 0x8d, 0x39, 0xa0, 0xcf, 0x22, 0x3f, 0x88, 0x86, 0xf8, 0x26, 0xb2, 0xdd, 0x56, 0xab, 0xd3,
+ 0x7f, 0xd8, 0x69, 0x57, 0x0a, 0xb5, 0x1a, 0x00, 0x2f, 0x2d, 0x02, 0x5d, 0xcf, 0x63, 0xb1, 0x64,
+ 0x7e, 0xcd, 0x7a, 0xfa, 0xdb, 0xc6, 0x52, 0xfd, 0x69, 0x01, 0xad, 0x65, 0x2f, 0x01, 0x07, 0x95,
+ 0xdc, 0xd6, 0xc3, 0xee, 0x7e, 0x07, 0xce, 0x99, 0xd1, 0xb3, 0x08, 0xd7, 0x93, 0xc1, 0x84, 0xe1,
+ 0x1b, 0x68, 0xa5, 0xef, 0x3e, 0xda, 0xeb, 0xc0, 0x29, 0xb3, 0xeb, 0x64, 0x61, 0x7d, 0x3a, 0x4e,
+ 0x34, 0xaa, 0x4d, 0xdc, 0x6e, 0xaf, 0xb2, 0x9c, 0x8f, 0x6a, 0x0b, 0x1a, 0x44, 0xe6, 0x2a, 0xbf,
+ 0x5b, 0xc8, 0xd9, 0x63, 0x62, 0x12, 0x78, 0x67, 0x1c, 0x13, 0xb7, 0x90, 0x25, 0x69, 0x72, 0xa4,
+ 0x63, 0xc2, 0xc9, 0x8f, 0x89, 0x87, 0xb0, 0xaf, 0x0e, 0x35, 0x74, 0x8d, 0x57, 0x91, 0x21, 0x58,
+ 0x1c, 0x06, 0x1e, 0x05, 0x7b, 0xe9, 0xc8, 0x70, 0x9a, 0x6f, 0xe5, 0xb1, 0xc9, 0x0c, 0x65, 0xee,
+ 0x7f, 0x6f, 0x89, 0x64, 0xa8, 0xf8, 0x33, 0x54, 0x1a, 0x86, 0x7c, 0x40, 0x43, 0x1d, 0x13, 0x4e,
+ 0xf3, 0x7a, 0x9e, 0xc8, 0x8e, 0x46, 0xcc, 0x05, 0x0c, 0x05, 0xdf, 0x41, 0xa5, 0x71, 0xec, 0x83,
+ 0x4e, 0xb5, 0xa4, 0xc9, 0x5b, 0x79, 0xe4, 0x47, 0x1a, 0xd1, 0xe2, 0xd1, 0xe3, 0x60, 0x48, 0x0c,
+ 0x1e, 0xef, 0x21, 0x3b, 0x62, 0xf2, 0x09, 0x17, 0x47, 0x49, 0x75, 0x75, 0xab, 0x08, 0xdc, 0xdb,
+ 0x79, 0xdc, 0x8c, 0xcd, 0x1b, 0xbd, 0x14, 0xef, 0x4a, 0x49, 0xbd, 0xc3, 0x11, 0x8b, 0xa4, 0x91,
+ 0x9c, 0x09, 0xe1, 0xcf, 0x91, 0x0d, 0xa1, 0x16, 0xf3, 0x20, 0x92, 0x55, 0xfb, 0xf4, 0x0b, 0x75,
+ 0x0c, 0x46, 0xa9, 0x92, 0x19, 0xa3, 0x76, 0x1f, 0x5d, 0x3e, 0xe5, 0x08, 0x7c, 0x09, 0x95, 0x24,
+ 0x15, 0x43, 0x26, 0xb5, 0xa7, 0xcb, 0xc4, 0xcc, 0x70, 0x15, 0xad, 0xd2, 0x30, 0xa0, 0x09, 0x4b,
+ 0xc0, 0x81, 0x45, 0xd8, 0x98, 0x4e, 0xef, 0x96, 0x90, 0x35, 0x82, 0x78, 0xaa, 0x6f, 0xa3, 0x0b,
+ 0x2f, 0x79, 0x00, 0xd7, 0x90, 0x6d, 0x3c, 0x90, 0x86, 0x8e, 0x45, 0x66, 0xf3, 0xfa, 0x79, 0xb4,
+ 0xbe, 0x60, 0xed, 0xfa, 0x2f, 0xcb, 0xc8, 0x9e, 0x86, 0x00, 0x76, 0x51, 0xd9, 0xe3, 0x91, 0x84,
+ 0xc0, 0x64, 0xc2, 0x44, 0x5d, 0xae, 0xc3, 0x5a, 0x53, 0x90, 0x62, 0x81, 0xc3, 0xe6, 0x2c, 0xfc,
+ 0x15, 0x2a, 0x0b, 0x96, 0xf0, 0xb1, 0xf0, 0xf4, 0xad, 0x95, 0xc4, 0xcd, 0xfc, 0xc0, 0x49, 0x41,
+ 0x84, 0x7d, 0x37, 0x0e, 0x04, 0x53, 0xd6, 0x48, 0xc8, 0x9c, 0x0a, 0x81, 0xb3, 0x0a, 0x13, 0x30,
+ 0x84, 0x7c, 0x55, 0xe4, 0x90, 0x14, 0xd2, 0xe7, 0xf0, 0xba, 0x63, 0x32, 0x65, 0x00, 0xb9, 0x1c,
+ 0x87, 0xd4, 0xd3, 0xaa, 0xd5, 0x15, 0x4d, 0x7f, 0x33, 0x8f, 0xde, 0x9f, 0x82, 0xc8, 0x1c, 0x7f,
+ 0xb7, 0x0c, 0x27, 0x8f, 0x23, 0x19, 0x8c, 0x58, 0xfd, 0xa7, 0x22, 0x5a, 0x5f, 0x78, 0x2b, 0xbe,
+ 0x88, 0x56, 0x82, 0x11, 0x1d, 0x32, 0xe3, 0xa9, 0x74, 0x82, 0x3b, 0xa8, 0x04, 0x69, 0xcd, 0xc2,
+ 0xd4, 0x4f, 0x4e, 0xf3, 0xfd, 0xd7, 0x1a, 0xad, 0xf1, 0x8d, 0xc6, 0x77, 0x22, 0x29, 0x8e, 0x89,
+ 0x21, 0x2b, 0x7f, 0x7b, 0x7c, 0x34, 0xa2, 0x91, 0x4a, 0x39, 0xed, 0x6f, 0x33, 0xc5, 0x18, 0x59,
+ 0x10, 0x12, 0x09, 0x98, 0x42, 0x2d, 0xeb, 0x31, 0xae, 0xa0, 0x22, 0x8b, 0x26, 0xf0, 0x3c, 0xb5,
+ 0xa4, 0x86, 0x6a, 0xc5, 0x0f, 0x84, 0x4e, 0x16, 0x58, 0x81, 0xa1, 0xe2, 0x41, 0x2d, 0x12, 0x90,
+ 0x03, 0x6a, 0x49, 0x8f, 0xf1, 0x6d, 0x54, 0x1a, 0x71, 0x78, 0x60, 0x02, 0x41, 0xac, 0x2e, 0x7b,
+ 0x25, 0xef, 0xb2, 0xbb, 0x0a, 0x61, 0x4a, 0x82, 0x81, 0xe3, 0x7b, 0xe8, 0x42, 0x22, 0x79, 0x7c,
+ 0x30, 0x14, 0x60, 0xaa, 0x83, 0x98, 0x89, 0x80, 0xfb, 0xd5, 0xf2, 0xe9, 0x95, 0xa5, 0x6d, 0xba,
+ 0x1e, 0x39, 0xaf, 0x68, 0x3b, 0x8a, 0xd5, 0xd7, 0xa4, 0xda, 0x27, 0xc8, 0xc9, 0xbc, 0x5f, 0xdd,
+ 0xfb, 0x88, 0x1d, 0x1b, 0x93, 0xaa, 0xa1, 0x32, 0xf3, 0x84, 0x86, 0xe3, 0xb4, 0x99, 0x81, 0x99,
+ 0xf5, 0xe4, 0xd3, 0xe5, 0x3b, 0x85, 0xfa, 0xbf, 0x50, 0xb5, 0xb3, 0x19, 0x86, 0x5b, 0x69, 0x2a,
+ 0x68, 0xf6, 0xb9, 0xe6, 0xf6, 0xeb, 0x32, 0x52, 0x07, 0x5e, 0x38, 0x56, 0xf7, 0xda, 0x55, 0xdd,
+ 0x50, 0x93, 0xf1, 0xc7, 0x68, 0x25, 0xe6, 0x42, 0x4e, 0xfd, 0xb7, 0x91, 0x1b, 0x2c, 0x00, 0x30,
+ 0x35, 0x21, 0x05, 0xd7, 0x0f, 0xd1, 0xb9, 0x45, 0x35, 0x28, 0xfa, 0xc5, 0xfd, 0x6e, 0x1f, 0xfa,
+ 0xc7, 0x55, 0x28, 0xf9, 0x97, 0x17, 0x37, 0xf7, 0x03, 0x21, 0xc7, 0x34, 0xec, 0xf6, 0xf1, 0x7b,
+ 0xd0, 0x1a, 0x7a, 0x7b, 0x84, 0x40, 0x03, 0xd9, 0x04, 0xdc, 0xd5, 0x45, 0x9c, 0xda, 0x02, 0x83,
+ 0xfb, 0x84, 0x0f, 0x66, 0x0d, 0xe2, 0xe7, 0x65, 0xe4, 0x98, 0xea, 0x71, 0xb6, 0x0d, 0xe2, 0x4b,
+ 0xb4, 0xee, 0x0b, 0x68, 0x6a, 0xe2, 0xc0, 0xd3, 0x4f, 0x33, 0x29, 0x5b, 0xcb, 0xf5, 0xa7, 0x06,
+ 0x92, 0xb5, 0x94, 0x60, 0x6a, 0xd7, 0x75, 0xb4, 0x16, 0xc4, 0x93, 0x5b, 0x07, 0x2c, 0xa2, 0x83,
+ 0xd0, 0xf4, 0x0a, 0x9b, 0x38, 0x6a, 0xad, 0x93, 0x2e, 0xa9, 0x7a, 0x04, 0xc6, 0x67, 0x22, 0x32,
+ 0x5d, 0xc0, 0x26, 0xb3, 0x39, 0xfe, 0x02, 0x59, 0x41, 0x4c, 0x47, 0x26, 0x49, 0x73, 0x5f, 0xd0,
+ 0xed, 0xbb, 0xbb, 0x0f, 0xe2, 0xf4, 0x05, 0xf6, 0x8b, 0x67, 0x9b, 0x96, 0x5a, 0x20, 0x9a, 0x56,
+ 0xff, 0xb3, 0x88, 0x9c, 0x56, 0x38, 0x4e, 0xa4, 0x49, 0xcf, 0x33, 0xb3, 0xcb, 0xb7, 0xe8, 0x02,
+ 0xd5, 0xbf, 0x0b, 0x34, 0x52, 0xb1, 0xae, 0xeb, 0x8b, 0xb1, 0xcd, 0x8d, 0x5c, 0xb9, 0x19, 0x38,
+ 0xad, 0x45, 0x46, 0xb3, 0x42, 0xff, 0xb7, 0x0e, 0x9d, 0x69, 0x9d, 0x0b, 0xef, 0x10, 0x0a, 0x55,
+ 0x9a, 0x1c, 0xa6, 0xb9, 0xe6, 0xfe, 0x76, 0x3d, 0xc8, 0x02, 0x53, 0x7b, 0x1b, 0xdd, 0x45, 0x0d,
+ 0x68, 0x94, 0x96, 0xa0, 0x8f, 0xa7, 0x95, 0x32, 0x37, 0x7a, 0x09, 0xec, 0x2f, 0x48, 0x68, 0x06,
+ 0xfe, 0x1a, 0x21, 0x3f, 0x48, 0x62, 0x2a, 0x41, 0x4e, 0x18, 0x2f, 0xe4, 0x3e, 0xb0, 0x3d, 0x43,
+ 0x2d, 0xa8, 0x64, 0xd8, 0xf8, 0x3e, 0x74, 0x0f, 0x3a, 0x8d, 0xa3, 0xd2, 0xe9, 0x75, 0xa1, 0xe5,
+ 0x1a, 0x89, 0x8a, 0x92, 0x00, 0x8f, 0xda, 0xd3, 0x15, 0x62, 0x7b, 0xd4, 0xec, 0x5d, 0x3b, 0x79,
+ 0xbe, 0xb1, 0xf4, 0x17, 0x7c, 0xff, 0x3c, 0xdf, 0x28, 0x7c, 0xff, 0x62, 0xa3, 0x70, 0x02, 0xdf,
+ 0x1f, 0xf0, 0xfd, 0x0d, 0xdf, 0xa0, 0xa4, 0x7f, 0xa4, 0x3f, 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff,
+ 0x4e, 0xfb, 0xdd, 0xf6, 0xa7, 0x0b, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/specs.proto b/vendor/src/github.com/docker/swarmkit/api/specs.proto
new file mode 100644
index 0000000000..1f560271c0
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/specs.proto
@@ -0,0 +1,231 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "types.proto";
+import "gogoproto/gogo.proto";
+import "duration/duration.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+
+// Specs are container objects for user provided input. All creations and
+// updates are done through spec types. As a convention, user input from a spec
+// is never touched in created objects. This allows one to verify that the
+// users intent has not been modified.
+//
+// Put differently, spec types can be said to represent the desired state of
+// the system. In situations where modifications need to be made to a
+// particular component, API objects will either contain a copy of the spec
+// component or a different representation to reflect allocation or resolution.
+
+message NodeSpec {
+ Annotations annotations = 1 [(gogoproto.nullable) = false];
+
+ enum Membership {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ PENDING = 0 [(gogoproto.enumvalue_customname) = "NodeMembershipPending"];
+ ACCEPTED = 1 [(gogoproto.enumvalue_customname) = "NodeMembershipAccepted"];
+ }
+
+ enum Availability {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // Active nodes.
+ ACTIVE = 0 [(gogoproto.enumvalue_customname) = "NodeAvailabilityActive"];
+
+ // Paused nodes won't be considered by the scheduler, preventing any
+ // further task to run on them.
+ PAUSE = 1 [(gogoproto.enumvalue_customname) = "NodeAvailabilityPause"];
+
+ // Drained nodes are paused and any task already running on them will
+ // be evicted.
+ DRAIN = 2 [(gogoproto.enumvalue_customname) = "NodeAvailabilityDrain"];
+ }
+
+ // Role defines the role the node should have.
+ NodeRole role = 2;
+
+ // Membership controls the admission of the node into the cluster.
+ Membership membership = 3;
+
+ // Availability allows a user to control the current scheduling status of a
+ // node.
+ Availability availability = 4;
+}
+
+// ServiceSpec defines the properties of a service.
+//
+// A service instructs the cluster in orchestrating repeated instances of a
+// template, implemented as tasks. Based on the number of instances, scheduling
+// strategy and restart policy, a number of application-level behaviors can be
+// defined.
+message ServiceSpec {
+ Annotations annotations = 1 [(gogoproto.nullable) = false];
+
+ // Task defines the task template this service will spawn.
+ TaskSpec task = 2 [(gogoproto.nullable) = false];
+
+ oneof mode {
+ ReplicatedService replicated = 3;
+ GlobalService global = 4;
+ }
+
+ // UpdateConfig controls the rate and policy of updates.
+ UpdateConfig update = 6;
+
+ // NetworkAttachmentConfig specifies how a service should be attached to a particular network.
+ //
+ // For now, this is a simple struct, but this can include future information
+ // instructing Swarm on how this service should work on the particular
+ // network.
+ message NetworkAttachmentConfig {
+ // Target specifies the target network for attachement. This value may be a
+ // network name or identifier. Only identifiers are supported at this time.
+ string target = 1;
+ // Aliases specifies a list of discoverable alternate names for the service on this Target.
+ repeated string aliases = 2;
+ }
+ repeated NetworkAttachmentConfig networks = 7;
+
+ // Service endpoint specifies the user provided configuration
+ // to properly discover and load balance a service.
+ EndpointSpec endpoint = 8;
+}
+
+// ReplicatedService set the reconcilation target to certain number of replicas.
+message ReplicatedService {
+ uint64 replicas = 1;
+}
+
+// GlobalService represent global service.
+message GlobalService {
+ // Empty message for now.
+}
+
+message TaskSpec {
+ oneof runtime {
+ ContainerSpec container = 1;
+ }
+
+ // Resource requirements for the container.
+ ResourceRequirements resources = 2;
+
+ // RestartPolicy specifies what to do when a task fails or finishes.
+ RestartPolicy restart = 4;
+
+ // Placement specifies node selection constraints
+ Placement placement = 5;
+}
+
+// Container specifies runtime parameters for a container.
+message ContainerSpec {
+ // image defines the image reference, as specified in the
+ // distribution/reference package. This may include a registry host, name,
+ // tag or digest.
+ //
+ // The field will be directly passed to the engine pulling. Well-behaved
+ // service definitions will used immutable references, either through tags
+ // that don't change or verifiable digests.
+ string image = 1;
+
+ // Labels defines labels to be added to the container at creation time. If
+ // collisions with system labels occur, these labels will be overridden.
+ //
+ // This field *must* remain compatible with the Labels field of
+ // Annotations.
+ map<string, string> labels = 2;
+
+ // Command to run the the container. The first element is a path to the
+ // executable and the following elements are treated as arguments.
+ //
+ // If command is empty, execution will fall back to the image's entrypoint.
+ repeated string command = 3;
+
+ // Args specifies arguments provided to the image's entrypoint.
+ // Ignored if command is specified.
+ repeated string args = 4;
+
+ // Env specifies the environment variables for the container in NAME=VALUE
+ // format. These must be compliant with [IEEE Std
+ // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html).
+ repeated string env = 5;
+
+ // Dir defines the working directory to set for the container process.
+ string dir = 6;
+
+ // User specifies the user that should be employed to run the container.
+ string user = 7;
+
+ repeated Mount mounts = 8 [(gogoproto.nullable) = false];
+
+ // StopGracePeriod the grace period for stopping the container before
+ // forcefully killing the container.
+ Duration stop_grace_period = 9;
+}
+
+// EndpointSpec defines the properties that can be configured to
+// access and loadbalance the service.
+message EndpointSpec {
+ // ResolutionMode specifies the mode of resolution to use for
+ // internal loadbalancing between tasks which are all within
+ // the cluster. This is sometimes calles east-west data path.
+ enum ResolutionMode {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // VIP resolution mode specifies that the
+ // service resolves to a logical IP and the requests
+ // are sent to that logical IP. Packets hitting that
+ // logical IP are load balanced to a chosen backend.
+ VIP = 0 [(gogoproto.enumvalue_customname) = "ResolutionModeVirtualIP"];
+
+ // DNSRR resolution mode specifies that the
+ // service directly gets resolved to one of the
+ // backend IP and the client directly initiates a
+ // request towards the actual backend. This requires
+ // that the client does not cache the DNS responses
+ // when the DNS response TTL is 0.
+ DNSRR = 1 [(gogoproto.enumvalue_customname) = "ResolutionModeDNSRoundRobin"];
+ }
+
+ ResolutionMode mode = 1;
+
+ // List of exposed ports that this service is accessible from
+ // external to the cluster.
+ repeated PortConfig ports = 2;
+}
+
+// NetworkSpec specifies user defined network parameters.
+message NetworkSpec {
+ Annotations annotations = 1 [(gogoproto.nullable) = false];
+
+ // DriverConfig specific configuration consumed by the network driver.
+ Driver driver_config = 2;
+
+ // IPv6Enabled enables support for IPv6 on the network.
+ bool ipv6_enabled = 3;
+
+ // internal restricts external access to the network. This may be
+ // accomplished by disabling the default gateway or through other means.
+ bool internal = 4;
+
+ IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"];
+}
+
+// ClusterSpec specifies global cluster settings.
+message ClusterSpec {
+ Annotations annotations = 1 [(gogoproto.nullable) = false];
+
+ // AcceptancePolicy defines the certificate issuance policy.
+ AcceptancePolicy acceptance_policy = 2 [(gogoproto.nullable) = false];
+
+ // Orchestration defines cluster-level orchestration settings.
+ OrchestrationConfig orchestration = 3 [(gogoproto.nullable) = false];
+
+ // Raft defines the cluster's raft settings.
+ RaftConfig raft = 4 [(gogoproto.nullable) = false];
+
+ // Dispatcher defines cluster-level dispatcher settings.
+ DispatcherConfig dispatcher = 5 [(gogoproto.nullable) = false];
+
+ // CAConfig defines cluster-level certificate authority settings.
+ CAConfig ca_config = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "CAConfig"];
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/timestamp/gen.go b/vendor/src/github.com/docker/swarmkit/api/timestamp/gen.go
new file mode 100644
index 0000000000..75fc639d6e
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/timestamp/gen.go
@@ -0,0 +1,3 @@
+//go:generate protoc -I.:../../vendor:../../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api/timestamp,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. timestamp.proto
+
+package timestamp
diff --git a/vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go b/vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go
new file mode 100644
index 0000000000..d305ee3237
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go
@@ -0,0 +1,469 @@
+// Code generated by protoc-gen-gogo.
+// source: timestamp.proto
+// DO NOT EDIT!
+
+/*
+ Package timestamp is a generated protocol buffer package.
+
+ It is generated from these files:
+ timestamp.proto
+
+ It has these top-level messages:
+ Timestamp
+*/
+package timestamp
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// now = time.time()
+// seconds = int(now)
+// nanos = int((now - seconds) * 10**9)
+// timestamp = Timestamp(seconds=seconds, nanos=nanos)
+//
+//
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorTimestamp, []int{0} }
+
+func init() {
+ proto.RegisterType((*Timestamp)(nil), "docker.swarmkit.v1.Timestamp")
+}
+
+func (m *Timestamp) Copy() *Timestamp {
+ if m == nil {
+ return nil
+ }
+
+ o := &Timestamp{
+ Seconds: m.Seconds,
+ Nanos: m.Nanos,
+ }
+
+ return o
+}
+
+func (this *Timestamp) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&timestamp.Timestamp{")
+ s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
+ s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringTimestamp(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringTimestamp(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *Timestamp) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Timestamp) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Seconds != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTimestamp(data, i, uint64(m.Seconds))
+ }
+ if m.Nanos != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTimestamp(data, i, uint64(m.Nanos))
+ }
+ return i, nil
+}
+
+func encodeFixed64Timestamp(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Timestamp(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintTimestamp(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+func (m *Timestamp) Size() (n int) {
+ var l int
+ _ = l
+ if m.Seconds != 0 {
+ n += 1 + sovTimestamp(uint64(m.Seconds))
+ }
+ if m.Nanos != 0 {
+ n += 1 + sovTimestamp(uint64(m.Nanos))
+ }
+ return n
+}
+
+func sovTimestamp(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozTimestamp(x uint64) (n int) {
+ return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Timestamp) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Timestamp{`,
+ `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`,
+ `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringTimestamp(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Timestamp) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTimestamp
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
+ }
+ m.Seconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTimestamp
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Seconds |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
+ }
+ m.Nanos = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTimestamp
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Nanos |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTimestamp(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTimestamp
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTimestamp(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTimestamp
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTimestamp
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTimestamp
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthTimestamp
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTimestamp
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipTimestamp(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorTimestamp = []byte{
+ // 197 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d,
+ 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f,
+ 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x54,
+ 0xb2, 0xe6, 0xe2, 0x0c, 0x81, 0x29, 0x13, 0x92, 0xe0, 0x62, 0x2f, 0x4e, 0x4d, 0xce, 0xcf, 0x4b,
+ 0x29, 0x96, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x82, 0x71, 0x85, 0x44, 0xb8, 0x58, 0xf3, 0x12,
+ 0xf3, 0xf2, 0x8b, 0x25, 0x98, 0x80, 0xe2, 0xac, 0x41, 0x10, 0x8e, 0x53, 0xc1, 0x89, 0x87, 0x72,
+ 0x0c, 0x37, 0x80, 0xf8, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02,
+ 0x10, 0x3f, 0x00, 0x62, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0xbd, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54,
+ 0x88, 0x03, 0x92, 0x4a, 0xd3, 0x9c, 0xf8, 0xe0, 0xb6, 0x05, 0x80, 0x84, 0x02, 0x18, 0x17, 0x30,
+ 0x32, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2,
+ 0x3e, 0x00, 0xaa, 0x5e, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0xa4, 0xb2,
+ 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x6c, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x26, 0xaa, 0x11,
+ 0xd7, 0xdc, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.proto b/vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.proto
new file mode 100644
index 0000000000..f33b56f33c
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.proto
@@ -0,0 +1,121 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+// TODO(stevvooe): We cannot use the google version because of the naive size
+// and deepcopy extensions. For now, we just generate this ourselves. This can
+// be fixed.
+// package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+
+// TODO(stevvooe): Commenting this out from the maddening behavior of google's
+// Go protobuf implementation.
+
+//option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// now = time.time()
+// seconds = int(now)
+// nanos = int((now - seconds) * 10**9)
+// timestamp = Timestamp(seconds=seconds, nanos=nanos)
+//
+//
+message Timestamp {
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/types.pb.go b/vendor/src/github.com/docker/swarmkit/api/types.pb.go
new file mode 100644
index 0000000000..1824a746c1
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/types.pb.go
@@ -0,0 +1,10052 @@
+// Code generated by protoc-gen-gogo.
+// source: types.proto
+// DO NOT EDIT!
+
+/*
+ Package api is a generated protocol buffer package.
+
+ It is generated from these files:
+ types.proto
+ specs.proto
+ objects.proto
+ control.proto
+ dispatcher.proto
+ ca.proto
+ snapshot.proto
+ raft.proto
+
+ It has these top-level messages:
+ Version
+ Annotations
+ Resources
+ ResourceRequirements
+ Platform
+ PluginDescription
+ EngineDescription
+ NodeDescription
+ RaftMemberStatus
+ NodeStatus
+ Image
+ Mount
+ RestartPolicy
+ UpdateConfig
+ ContainerStatus
+ TaskStatus
+ IPAMConfig
+ PortConfig
+ Driver
+ IPAMOptions
+ Peer
+ WeightedPeer
+ IssuanceStatus
+ AcceptancePolicy
+ CAConfig
+ OrchestrationConfig
+ DispatcherConfig
+ RaftConfig
+ RaftMember
+ Placement
+ RootCA
+ Certificate
+ EncryptionKey
+ ManagerStatus
+ NodeSpec
+ ServiceSpec
+ ReplicatedService
+ GlobalService
+ TaskSpec
+ ContainerSpec
+ EndpointSpec
+ NetworkSpec
+ ClusterSpec
+ Meta
+ Node
+ Service
+ Endpoint
+ Task
+ NetworkAttachment
+ Network
+ Cluster
+ GetNodeRequest
+ GetNodeResponse
+ ListNodesRequest
+ ListNodesResponse
+ UpdateNodeRequest
+ UpdateNodeResponse
+ RemoveNodeRequest
+ RemoveNodeResponse
+ GetTaskRequest
+ GetTaskResponse
+ RemoveTaskRequest
+ RemoveTaskResponse
+ ListTasksRequest
+ ListTasksResponse
+ CreateServiceRequest
+ CreateServiceResponse
+ GetServiceRequest
+ GetServiceResponse
+ UpdateServiceRequest
+ UpdateServiceResponse
+ RemoveServiceRequest
+ RemoveServiceResponse
+ ListServicesRequest
+ ListServicesResponse
+ CreateNetworkRequest
+ CreateNetworkResponse
+ GetNetworkRequest
+ GetNetworkResponse
+ RemoveNetworkRequest
+ RemoveNetworkResponse
+ ListNetworksRequest
+ ListNetworksResponse
+ RemoveManagerResponse
+ GetClusterRequest
+ GetClusterResponse
+ ListClustersRequest
+ ListClustersResponse
+ UpdateClusterRequest
+ UpdateClusterResponse
+ SessionRequest
+ SessionMessage
+ HeartbeatRequest
+ HeartbeatResponse
+ UpdateTaskStatusRequest
+ UpdateTaskStatusResponse
+ TasksRequest
+ TasksMessage
+ NodeCertificateStatusRequest
+ NodeCertificateStatusResponse
+ IssueNodeCertificateRequest
+ IssueNodeCertificateResponse
+ GetRootCACertificateRequest
+ GetRootCACertificateResponse
+ StoreSnapshot
+ ClusterSnapshot
+ Snapshot
+ JoinRequest
+ JoinResponse
+ LeaveRequest
+ LeaveResponse
+ ProcessRaftMessageRequest
+ ProcessRaftMessageResponse
+ ResolveAddressRequest
+ ResolveAddressResponse
+ InternalRaftRequest
+ StoreAction
+*/
+package api
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import docker_swarmkit_v1 "github.com/docker/swarmkit/api/timestamp"
+import docker_swarmkit_v11 "github.com/docker/swarmkit/api/duration"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+// TaskState enumerates the states that a task progresses through within an
+// agent. States are designed to be monotonically increasing, such that if two
+// states are seen by a task, the greater of the new represents the true state.
+type TaskState int32
+
+const (
+ TaskStateNew TaskState = 0
+ TaskStateAllocated TaskState = 64
+ TaskStatePending TaskState = 128
+ TaskStateAssigned TaskState = 192
+ TaskStateAccepted TaskState = 256
+ TaskStatePreparing TaskState = 320
+ TaskStateReady TaskState = 384
+ TaskStateStarting TaskState = 448
+ TaskStateRunning TaskState = 512
+ TaskStateCompleted TaskState = 576
+ TaskStateShutdown TaskState = 640
+ TaskStateFailed TaskState = 704
+ TaskStateRejected TaskState = 768
+)
+
+var TaskState_name = map[int32]string{
+ 0: "NEW",
+ 64: "ALLOCATED",
+ 128: "PENDING",
+ 192: "ASSIGNED",
+ 256: "ACCEPTED",
+ 320: "PREPARING",
+ 384: "READY",
+ 448: "STARTING",
+ 512: "RUNNING",
+ 576: "COMPLETE",
+ 640: "SHUTDOWN",
+ 704: "FAILED",
+ 768: "REJECTED",
+}
+var TaskState_value = map[string]int32{
+ "NEW": 0,
+ "ALLOCATED": 64,
+ "PENDING": 128,
+ "ASSIGNED": 192,
+ "ACCEPTED": 256,
+ "PREPARING": 320,
+ "READY": 384,
+ "STARTING": 448,
+ "RUNNING": 512,
+ "COMPLETE": 576,
+ "SHUTDOWN": 640,
+ "FAILED": 704,
+ "REJECTED": 768,
+}
+
+func (x TaskState) String() string {
+ return proto.EnumName(TaskState_name, int32(x))
+}
+func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
+
+type NodeRole int32
+
+const (
+ NodeRoleWorker NodeRole = 0
+ NodeRoleManager NodeRole = 1
+)
+
+var NodeRole_name = map[int32]string{
+ 0: "WORKER",
+ 1: "MANAGER",
+}
+var NodeRole_value = map[string]int32{
+ "WORKER": 0,
+ "MANAGER": 1,
+}
+
+func (x NodeRole) String() string {
+ return proto.EnumName(NodeRole_name, int32(x))
+}
+func (NodeRole) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
+
+type RaftMemberStatus_Reachability int32
+
+const (
+ // Unknown indicates that the manager state cannot be resolved
+ RaftMemberStatus_UNKNOWN RaftMemberStatus_Reachability = 0
+ // Unreachable indicates that the node cannot be contacted by other
+ // raft cluster members.
+ RaftMemberStatus_UNREACHABLE RaftMemberStatus_Reachability = 1
+ // Reachable indicates that the node is healthy and reachable
+ // by other members.
+ RaftMemberStatus_REACHABLE RaftMemberStatus_Reachability = 2
+)
+
+var RaftMemberStatus_Reachability_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "UNREACHABLE",
+ 2: "REACHABLE",
+}
+var RaftMemberStatus_Reachability_value = map[string]int32{
+ "UNKNOWN": 0,
+ "UNREACHABLE": 1,
+ "REACHABLE": 2,
+}
+
+func (x RaftMemberStatus_Reachability) String() string {
+ return proto.EnumName(RaftMemberStatus_Reachability_name, int32(x))
+}
+func (RaftMemberStatus_Reachability) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorTypes, []int{8, 0}
+}
+
+// TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`.
+type NodeStatus_State int32
+
+const (
+ // Unknown indicates the node state cannot be resolved.
+ NodeStatus_UNKNOWN NodeStatus_State = 0
+ // Down indicates the node is down.
+ NodeStatus_DOWN NodeStatus_State = 1
+ // Ready indicates the node is ready to accept tasks.
+ NodeStatus_READY NodeStatus_State = 2
+ // Disconnected indicates the node is currently trying to find new manager.
+ NodeStatus_DISCONNECTED NodeStatus_State = 3
+)
+
+var NodeStatus_State_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "DOWN",
+ 2: "READY",
+ 3: "DISCONNECTED",
+}
+var NodeStatus_State_value = map[string]int32{
+ "UNKNOWN": 0,
+ "DOWN": 1,
+ "READY": 2,
+ "DISCONNECTED": 3,
+}
+
+func (x NodeStatus_State) String() string {
+ return proto.EnumName(NodeStatus_State_name, int32(x))
+}
+func (NodeStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9, 0} }
+
+type Mount_MountType int32
+
+const (
+ MountTypeBind Mount_MountType = 0
+ MountTypeVolume Mount_MountType = 1
+)
+
+var Mount_MountType_name = map[int32]string{
+ 0: "BIND",
+ 1: "VOLUME",
+}
+var Mount_MountType_value = map[string]int32{
+ "BIND": 0,
+ "VOLUME": 1,
+}
+
+func (x Mount_MountType) String() string {
+ return proto.EnumName(Mount_MountType_name, int32(x))
+}
+func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11, 0} }
+
+type Mount_BindOptions_MountPropagation int32
+
+const (
+ MountPropagationRPrivate Mount_BindOptions_MountPropagation = 0
+ MountPropagationPrivate Mount_BindOptions_MountPropagation = 1
+ MountPropagationRShared Mount_BindOptions_MountPropagation = 2
+ MountPropagationShared Mount_BindOptions_MountPropagation = 3
+ MountPropagationRSlave Mount_BindOptions_MountPropagation = 4
+ MountPropagationSlave Mount_BindOptions_MountPropagation = 5
+)
+
+var Mount_BindOptions_MountPropagation_name = map[int32]string{
+ 0: "RPRIVATE",
+ 1: "PRIVATE",
+ 2: "RSHARED",
+ 3: "SHARED",
+ 4: "RSLAVE",
+ 5: "SLAVE",
+}
+var Mount_BindOptions_MountPropagation_value = map[string]int32{
+ "RPRIVATE": 0,
+ "PRIVATE": 1,
+ "RSHARED": 2,
+ "SHARED": 3,
+ "RSLAVE": 4,
+ "SLAVE": 5,
+}
+
+func (x Mount_BindOptions_MountPropagation) String() string {
+ return proto.EnumName(Mount_BindOptions_MountPropagation_name, int32(x))
+}
+func (Mount_BindOptions_MountPropagation) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorTypes, []int{11, 0, 0}
+}
+
+type RestartPolicy_RestartCondition int32
+
+const (
+ RestartOnNone RestartPolicy_RestartCondition = 0
+ RestartOnFailure RestartPolicy_RestartCondition = 1
+ RestartOnAny RestartPolicy_RestartCondition = 2
+)
+
+var RestartPolicy_RestartCondition_name = map[int32]string{
+ 0: "NONE",
+ 1: "ON_FAILURE",
+ 2: "ANY",
+}
+var RestartPolicy_RestartCondition_value = map[string]int32{
+ "NONE": 0,
+ "ON_FAILURE": 1,
+ "ANY": 2,
+}
+
+func (x RestartPolicy_RestartCondition) String() string {
+ return proto.EnumName(RestartPolicy_RestartCondition_name, int32(x))
+}
+func (RestartPolicy_RestartCondition) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorTypes, []int{12, 0}
+}
+
+// AddressFamily specifies the network address family that
+// this IPAMConfig belongs to.
+type IPAMConfig_AddressFamily int32
+
+const (
+ IPAMConfig_UNKNOWN IPAMConfig_AddressFamily = 0
+ IPAMConfig_IPV4 IPAMConfig_AddressFamily = 4
+ IPAMConfig_IPV6 IPAMConfig_AddressFamily = 6
+)
+
+var IPAMConfig_AddressFamily_name = map[int32]string{
+ 0: "UNKNOWN",
+ 4: "IPV4",
+ 6: "IPV6",
+}
+var IPAMConfig_AddressFamily_value = map[string]int32{
+ "UNKNOWN": 0,
+ "IPV4": 4,
+ "IPV6": 6,
+}
+
+func (x IPAMConfig_AddressFamily) String() string {
+ return proto.EnumName(IPAMConfig_AddressFamily_name, int32(x))
+}
+func (IPAMConfig_AddressFamily) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorTypes, []int{16, 0}
+}
+
+type PortConfig_Protocol int32
+
+const (
+ ProtocolTCP PortConfig_Protocol = 0
+ ProtocolUDP PortConfig_Protocol = 1
+)
+
+var PortConfig_Protocol_name = map[int32]string{
+ 0: "TCP",
+ 1: "UDP",
+}
+var PortConfig_Protocol_value = map[string]int32{
+ "TCP": 0,
+ "UDP": 1,
+}
+
+func (x PortConfig_Protocol) String() string {
+ return proto.EnumName(PortConfig_Protocol_name, int32(x))
+}
+func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17, 0} }
+
+type IssuanceStatus_State int32
+
+const (
+ IssuanceStateUnknown IssuanceStatus_State = 0
+ IssuanceStateRenew IssuanceStatus_State = 1
+ IssuanceStatePending IssuanceStatus_State = 2
+ IssuanceStateIssued IssuanceStatus_State = 3
+ IssuanceStateFailed IssuanceStatus_State = 4
+)
+
+var IssuanceStatus_State_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "RENEW",
+ 2: "PENDING",
+ 3: "ISSUED",
+ 4: "FAILED",
+}
+var IssuanceStatus_State_value = map[string]int32{
+ "UNKNOWN": 0,
+ "RENEW": 1,
+ "PENDING": 2,
+ "ISSUED": 3,
+ "FAILED": 4,
+}
+
+func (x IssuanceStatus_State) String() string {
+ return proto.EnumName(IssuanceStatus_State_name, int32(x))
+}
+func (IssuanceStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22, 0} }
+
+// Encryption algorithm that can implemented using this key
+type EncryptionKey_Algorithm int32
+
+const (
+ AES_128_GCM EncryptionKey_Algorithm = 0
+)
+
+var EncryptionKey_Algorithm_name = map[int32]string{
+ 0: "AES_128_GCM",
+}
+var EncryptionKey_Algorithm_value = map[string]int32{
+ "AES_128_GCM": 0,
+}
+
+func (x EncryptionKey_Algorithm) String() string {
+ return proto.EnumName(EncryptionKey_Algorithm_name, int32(x))
+}
+func (EncryptionKey_Algorithm) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorTypes, []int{32, 0}
+}
+
+// Version tracks the last time an object in the store was updated.
+type Version struct {
+ Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
+}
+
+func (m *Version) Reset() { *m = Version{} }
+func (*Version) ProtoMessage() {}
+func (*Version) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
+
+// Annotations provide useful information to identify API objects. They are
+// common to all API specs.
+type Annotations struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *Annotations) Reset() { *m = Annotations{} }
+func (*Annotations) ProtoMessage() {}
+func (*Annotations) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
+
+type Resources struct {
+ // Amount of CPUs (e.g. 2000000000 = 2 CPU cores)
+ NanoCPUs int64 `protobuf:"varint,1,opt,name=nano_cpus,json=nanoCpus,proto3" json:"nano_cpus,omitempty"`
+ // Amount of memory in bytes.
+ MemoryBytes int64 `protobuf:"varint,2,opt,name=memory_bytes,json=memoryBytes,proto3" json:"memory_bytes,omitempty"`
+}
+
+func (m *Resources) Reset() { *m = Resources{} }
+func (*Resources) ProtoMessage() {}
+func (*Resources) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} }
+
+type ResourceRequirements struct {
+ Limits *Resources `protobuf:"bytes,1,opt,name=limits" json:"limits,omitempty"`
+ Reservations *Resources `protobuf:"bytes,2,opt,name=reservations" json:"reservations,omitempty"`
+}
+
+func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
+func (*ResourceRequirements) ProtoMessage() {}
+func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} }
+
+type Platform struct {
+ // Architecture (e.g. x86_64)
+ Architecture string `protobuf:"bytes,1,opt,name=architecture,proto3" json:"architecture,omitempty"`
+ // Operating System (e.g. linux)
+ OS string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"`
+}
+
+func (m *Platform) Reset() { *m = Platform{} }
+func (*Platform) ProtoMessage() {}
+func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} }
+
+// PluginDescription describes an engine plugin.
+type PluginDescription struct {
+ // Type of plugin. Canonical values for existing types are
+ // Volume, Network, and Authorization. More types could be
+ // supported in the future.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Name of the plugin
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *PluginDescription) Reset() { *m = PluginDescription{} }
+func (*PluginDescription) ProtoMessage() {}
+func (*PluginDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} }
+
+type EngineDescription struct {
+ // Docker daemon version running on the node.
+ EngineVersion string `protobuf:"bytes,1,opt,name=engine_version,json=engineVersion,proto3" json:"engine_version,omitempty"`
+ // Labels attached to the engine.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Volume, Network, and Auth plugins
+ Plugins []PluginDescription `protobuf:"bytes,3,rep,name=plugins" json:"plugins"`
+}
+
+func (m *EngineDescription) Reset() { *m = EngineDescription{} }
+func (*EngineDescription) ProtoMessage() {}
+func (*EngineDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{6} }
+
+type NodeDescription struct {
+ // Hostname of the node as reported by the agent.
+ // This is different from spec.meta.name which is user-defined.
+ Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ // Platform of the node.
+ Platform *Platform `protobuf:"bytes,2,opt,name=platform" json:"platform,omitempty"`
+ // Total resources on the node.
+ Resources *Resources `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"`
+ // Information about the Docker Engine on the node.
+ Engine *EngineDescription `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"`
+}
+
+func (m *NodeDescription) Reset() { *m = NodeDescription{} }
+func (*NodeDescription) ProtoMessage() {}
+func (*NodeDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{7} }
+
+type RaftMemberStatus struct {
+ Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"`
+ Reachability RaftMemberStatus_Reachability `protobuf:"varint,2,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"`
+ Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (m *RaftMemberStatus) Reset() { *m = RaftMemberStatus{} }
+func (*RaftMemberStatus) ProtoMessage() {}
+func (*RaftMemberStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{8} }
+
+type NodeStatus struct {
+ State NodeStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.NodeStatus_State" json:"state,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (m *NodeStatus) Reset() { *m = NodeStatus{} }
+func (*NodeStatus) ProtoMessage() {}
+func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9} }
+
+type Image struct {
+ // reference is a docker image reference. This can include a rpository, tag
+ // or be fully qualified witha digest. The format is specified in the
+ // distribution/reference package.
+ Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"`
+}
+
+func (m *Image) Reset() { *m = Image{} }
+func (*Image) ProtoMessage() {}
+func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{10} }
+
+// Mount describes volume mounts for a container.
+//
+// The Mount type follows the structure of the mount syscall, including a type,
+// source, target. Top-level flags, such as writable, are common to all kinds
+// of mounts, where we also provide options that are specific to a type of
+// mount. This corresponds to flags and data, respectively, in the syscall.
+type Mount struct {
+ // Type defines the nature of the mount.
+ Type Mount_MountType `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.Mount_MountType" json:"type,omitempty"`
+ // Source path to mount
+ Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
+ // Target path in container
+ Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
+ // Writable should be set to true if the mount should be writable from the
+ // container.
+ Writable bool `protobuf:"varint,4,opt,name=writable,proto3" json:"writable,omitempty"`
+ // BindOptions configures properties of a bind mount type.
+ BindOptions *Mount_BindOptions `protobuf:"bytes,5,opt,name=bind_options,json=bindOptions" json:"bind_options,omitempty"`
+ // VolumeOptions configures the properties specific to a volume mount type.
+ VolumeOptions *Mount_VolumeOptions `protobuf:"bytes,6,opt,name=volume_options,json=volumeOptions" json:"volume_options,omitempty"`
+}
+
+func (m *Mount) Reset() { *m = Mount{} }
+func (*Mount) ProtoMessage() {}
+func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11} }
+
+// BindOptions specifies options that are specific to a bind mount.
+type Mount_BindOptions struct {
+ // Propagation mode of mount.
+ Propagation Mount_BindOptions_MountPropagation `protobuf:"varint,1,opt,name=propagation,proto3,enum=docker.swarmkit.v1.Mount_BindOptions_MountPropagation" json:"propagation,omitempty"`
+}
+
+func (m *Mount_BindOptions) Reset() { *m = Mount_BindOptions{} }
+func (*Mount_BindOptions) ProtoMessage() {}
+func (*Mount_BindOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11, 0} }
+
+// VolumeOptions contains parameters for mounting the volume.
+type Mount_VolumeOptions struct {
+ // populate volume with data from target
+ Populate bool `protobuf:"varint,1,opt,name=populate,proto3" json:"populate,omitempty"`
+ // labels to apply to the volume if creating
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // DriverConfig specifies the options that may be passed to the driver
+ // if the volume is created.
+ //
+ // If this is empty, no volume will be created if the volume is missing.
+ DriverConfig *Driver `protobuf:"bytes,3,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"`
+}
+
+func (m *Mount_VolumeOptions) Reset() { *m = Mount_VolumeOptions{} }
+func (*Mount_VolumeOptions) ProtoMessage() {}
+func (*Mount_VolumeOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11, 1} }
+
+type RestartPolicy struct {
+ Condition RestartPolicy_RestartCondition `protobuf:"varint,1,opt,name=condition,proto3,enum=docker.swarmkit.v1.RestartPolicy_RestartCondition" json:"condition,omitempty"`
+ // Delay between restart attempts
+ Delay *docker_swarmkit_v11.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay,omitempty"`
+ // MaxAttempts is the maximum number of restarts to attempt on an
+ // instance before giving up. Ignored if 0.
+ MaxAttempts uint64 `protobuf:"varint,3,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"`
+ // Window is the time window used to evaluate the restart policy.
+ // The time window is unbounded if this is 0.
+ Window *docker_swarmkit_v11.Duration `protobuf:"bytes,4,opt,name=window" json:"window,omitempty"`
+}
+
+func (m *RestartPolicy) Reset() { *m = RestartPolicy{} }
+func (*RestartPolicy) ProtoMessage() {}
+func (*RestartPolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{12} }
+
+// UpdateConfig specifies the rate and policy of updates.
+// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy.
+type UpdateConfig struct {
+ // Maximum number of tasks to be updated in one iteration.
+ // 0 means unlimited parallelism.
+ Parallelism uint64 `protobuf:"varint,1,opt,name=parallelism,proto3" json:"parallelism,omitempty"`
+ // Amount of time between updates.
+ Delay docker_swarmkit_v11.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay"`
+}
+
+func (m *UpdateConfig) Reset() { *m = UpdateConfig{} }
+func (*UpdateConfig) ProtoMessage() {}
+func (*UpdateConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{13} }
+
+// Container specific status.
+type ContainerStatus struct {
+ ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ PID int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+ ExitCode int32 `protobuf:"varint,3,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"`
+}
+
+func (m *ContainerStatus) Reset() { *m = ContainerStatus{} }
+func (*ContainerStatus) ProtoMessage() {}
+func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14} }
+
+type TaskStatus struct {
+ Timestamp *docker_swarmkit_v1.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
+ // State expresses the current state of the task.
+ State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=docker.swarmkit.v1.TaskState" json:"state,omitempty"`
+ // Message reports a message for the task status. This should provide a
+ // human readable message that can point to how the task actually arrived
+ // at a current state.
+ //
+ // As a convention, we place the a small message here that led to the
+ // current state. For example, if the task is in ready, because it was
+ // prepared, we'd place "prepared" in this field. If we skipped preparation
+ // because the task is prepared, we would put "already prepared" in this
+ // field.
+ Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
+ // Err is set if the task is in an error state.
+ //
+ // The following states should report a companion error:
+ //
+ // FAILED, REJECTED
+ //
+ // TODO(stevvooe) Integrate this field with the error interface.
+ Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"`
+ // Container status contains container specific status information.
+ //
+ // Types that are valid to be assigned to RuntimeStatus:
+ // *TaskStatus_Container
+ RuntimeStatus isTaskStatus_RuntimeStatus `protobuf_oneof:"runtime_status"`
+}
+
+func (m *TaskStatus) Reset() { *m = TaskStatus{} }
+func (*TaskStatus) ProtoMessage() {}
+func (*TaskStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{15} }
+
+type isTaskStatus_RuntimeStatus interface {
+ isTaskStatus_RuntimeStatus()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type TaskStatus_Container struct {
+ Container *ContainerStatus `protobuf:"bytes,5,opt,name=container,oneof"`
+}
+
+func (*TaskStatus_Container) isTaskStatus_RuntimeStatus() {}
+
+func (m *TaskStatus) GetRuntimeStatus() isTaskStatus_RuntimeStatus {
+ if m != nil {
+ return m.RuntimeStatus
+ }
+ return nil
+}
+
+func (m *TaskStatus) GetContainer() *ContainerStatus {
+ if x, ok := m.GetRuntimeStatus().(*TaskStatus_Container); ok {
+ return x.Container
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*TaskStatus) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _TaskStatus_OneofMarshaler, _TaskStatus_OneofUnmarshaler, _TaskStatus_OneofSizer, []interface{}{
+ (*TaskStatus_Container)(nil),
+ }
+}
+
+func _TaskStatus_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*TaskStatus)
+ // runtime_status
+ switch x := m.RuntimeStatus.(type) {
+ case *TaskStatus_Container:
+ _ = b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Container); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("TaskStatus.RuntimeStatus has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _TaskStatus_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*TaskStatus)
+ switch tag {
+ case 5: // runtime_status.container
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ContainerStatus)
+ err := b.DecodeMessage(msg)
+ m.RuntimeStatus = &TaskStatus_Container{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _TaskStatus_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*TaskStatus)
+ // runtime_status
+ switch x := m.RuntimeStatus.(type) {
+ case *TaskStatus_Container:
+ s := proto.Size(x.Container)
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// IPAMConfig specifies parameters for IP Address Management.
+type IPAMConfig struct {
+ Family IPAMConfig_AddressFamily `protobuf:"varint,1,opt,name=family,proto3,enum=docker.swarmkit.v1.IPAMConfig_AddressFamily" json:"family,omitempty"`
+ // Subnet defines a network as a CIDR address (ie network and mask
+ // 192.168.0.1/24).
+ Subnet string `protobuf:"bytes,2,opt,name=subnet,proto3" json:"subnet,omitempty"`
+ // Range defines the portion of the subnet to allocate to tasks. This is
+ // defined as a subnet within the primary subnet.
+ Range string `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
+ // Gateway address within the subnet.
+ Gateway string `protobuf:"bytes,4,opt,name=gateway,proto3" json:"gateway,omitempty"`
+ // Reserved is a list of address from the master pool that should *not* be
+ // allocated. These addresses may have already been allocated or may be
+ // reserved for another allocation manager.
+ Reserved map[string]string `protobuf:"bytes,5,rep,name=reserved" json:"reserved,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *IPAMConfig) Reset() { *m = IPAMConfig{} }
+func (*IPAMConfig) ProtoMessage() {}
+func (*IPAMConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16} }
+
+// PortConfig specifies an exposed port which can be
+// addressed using the given name. This can be later queried
+// using a service discovery api or a DNS SRV query. The node
+// port specifies a port that can be used to address this
+// service external to the cluster by sending a connection
+// request to this port to any node on the cluster.
+type PortConfig struct {
+ // Name for the port. If provided the port information can
+ // be queried using the name as in a DNS SRV query.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Protocol for the port which is exposed.
+ Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=docker.swarmkit.v1.PortConfig_Protocol" json:"protocol,omitempty"`
+ // The port which the application is exposing and is bound to.
+ TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"`
+ // PublishedPort specifies the port on which the service is
+ // exposed. If specified, the port must be
+ // within the available range. If not specified, an available
+ // port is automatically assigned.
+ PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"`
+}
+
+func (m *PortConfig) Reset() { *m = PortConfig{} }
+func (*PortConfig) ProtoMessage() {}
+func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17} }
+
+// Driver is a generic driver type to be used throughout the API. For now, a
+// driver is simply a name and set of options. The field contents depend on the
+// target use case and driver application. For example, a network driver may
+// have different rules than a volume driver.
+type Driver struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Options map[string]string `protobuf:"bytes,2,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *Driver) Reset() { *m = Driver{} }
+func (*Driver) ProtoMessage() {}
+func (*Driver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} }
+
+type IPAMOptions struct {
+ Driver *Driver `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"`
+ Configs []*IPAMConfig `protobuf:"bytes,3,rep,name=configs" json:"configs,omitempty"`
+}
+
+func (m *IPAMOptions) Reset() { *m = IPAMOptions{} }
+func (*IPAMOptions) ProtoMessage() {}
+func (*IPAMOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19} }
+
+// Peer should be used anywhere where we are describing a remote peer.
+type Peer struct {
+ NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
+}
+
+func (m *Peer) Reset() { *m = Peer{} }
+func (*Peer) ProtoMessage() {}
+func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} }
+
+// WeightedPeer should be used anywhere where we are describing a remote peer
+// with a weight.
+type WeightedPeer struct {
+ Peer *Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
+ Weight int64 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"`
+}
+
+func (m *WeightedPeer) Reset() { *m = WeightedPeer{} }
+func (*WeightedPeer) ProtoMessage() {}
+func (*WeightedPeer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{21} }
+
+type IssuanceStatus struct {
+ State IssuanceStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.IssuanceStatus_State" json:"state,omitempty"`
+ // Err is set if the Certificate Issuance is in an error state.
+ // The following states should report a companion error:
+ // FAILED
+ Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
+}
+
+func (m *IssuanceStatus) Reset() { *m = IssuanceStatus{} }
+func (*IssuanceStatus) ProtoMessage() {}
+func (*IssuanceStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22} }
+
+type AcceptancePolicy struct {
+ Policies []*AcceptancePolicy_RoleAdmissionPolicy `protobuf:"bytes,1,rep,name=policies" json:"policies,omitempty"`
+}
+
+func (m *AcceptancePolicy) Reset() { *m = AcceptancePolicy{} }
+func (*AcceptancePolicy) ProtoMessage() {}
+func (*AcceptancePolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} }
+
+type AcceptancePolicy_RoleAdmissionPolicy struct {
+ Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"`
+ // Autoaccept controls which roles' certificates are automatically
+ // issued without administrator intervention.
+ Autoaccept bool `protobuf:"varint,2,opt,name=autoaccept,proto3" json:"autoaccept,omitempty"`
+ // Secret represents a user-provided string that is necessary for new
+ // nodes to join the cluster
+ Secret *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret `protobuf:"bytes,3,opt,name=secret" json:"secret,omitempty"`
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy) Reset() { *m = AcceptancePolicy_RoleAdmissionPolicy{} }
+func (*AcceptancePolicy_RoleAdmissionPolicy) ProtoMessage() {}
+func (*AcceptancePolicy_RoleAdmissionPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptorTypes, []int{23, 0}
+}
+
+type AcceptancePolicy_RoleAdmissionPolicy_HashedSecret struct {
+ // The actual hashed content
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ // The type of hash we are using
+ Alg string `protobuf:"bytes,2,opt,name=alg,proto3" json:"alg,omitempty"`
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) Reset() {
+ *m = AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{}
+}
+func (*AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) ProtoMessage() {}
+func (*AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) Descriptor() ([]byte, []int) {
+ return fileDescriptorTypes, []int{23, 0, 0}
+}
+
+type CAConfig struct {
+ // NodeCertExpiry is the duration certificates should be issued for
+ NodeCertExpiry *docker_swarmkit_v11.Duration `protobuf:"bytes,1,opt,name=node_cert_expiry,json=nodeCertExpiry" json:"node_cert_expiry,omitempty"`
+}
+
+func (m *CAConfig) Reset() { *m = CAConfig{} }
+func (*CAConfig) ProtoMessage() {}
+func (*CAConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} }
+
+// OrchestrationConfig defines cluster-level orchestration settings.
+type OrchestrationConfig struct {
+ // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
+ // node. If negative, never remove completed or failed tasks.
+ TaskHistoryRetentionLimit int64 `protobuf:"varint,1,opt,name=task_history_retention_limit,json=taskHistoryRetentionLimit,proto3" json:"task_history_retention_limit,omitempty"`
+}
+
+func (m *OrchestrationConfig) Reset() { *m = OrchestrationConfig{} }
+func (*OrchestrationConfig) ProtoMessage() {}
+func (*OrchestrationConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25} }
+
+// DispatcherConfig defines cluster-level dispatcher settings.
+type DispatcherConfig struct {
+ // HeartbeatPeriod defines how often agent should send heartbeats to
+ // dispatcher.
+ HeartbeatPeriod uint64 `protobuf:"varint,1,opt,name=heartbeat_period,json=heartbeatPeriod,proto3" json:"heartbeat_period,omitempty"`
+}
+
+func (m *DispatcherConfig) Reset() { *m = DispatcherConfig{} }
+func (*DispatcherConfig) ProtoMessage() {}
+func (*DispatcherConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{26} }
+
+// RaftConfig defines raft settings for the cluster.
+type RaftConfig struct {
+ // SnapshotInterval is the number of log entries between snapshots.
+ SnapshotInterval uint64 `protobuf:"varint,1,opt,name=snapshot_interval,json=snapshotInterval,proto3" json:"snapshot_interval,omitempty"`
+ // KeepOldSnapshots is the number of snapshots to keep beyond the
+ // current snapshot.
+ KeepOldSnapshots uint64 `protobuf:"varint,2,opt,name=keep_old_snapshots,json=keepOldSnapshots,proto3" json:"keep_old_snapshots,omitempty"`
+ // LogEntriesForSlowFollowers is the number of log entries to keep
+ // around to sync up slow followers after a snapshot is created.
+ LogEntriesForSlowFollowers uint64 `protobuf:"varint,3,opt,name=log_entries_for_slow_followers,json=logEntriesForSlowFollowers,proto3" json:"log_entries_for_slow_followers,omitempty"`
+ // HeartbeatTick defines the amount of ticks (in seconds) between
+ // each heartbeat message sent to other members for health-check.
+ HeartbeatTick uint32 `protobuf:"varint,4,opt,name=heartbeat_tick,json=heartbeatTick,proto3" json:"heartbeat_tick,omitempty"`
+ // ElectionTick defines the amount of ticks (in seconds) needed
+ // without a leader to trigger a new election.
+ ElectionTick uint32 `protobuf:"varint,5,opt,name=election_tick,json=electionTick,proto3" json:"election_tick,omitempty"`
+}
+
+func (m *RaftConfig) Reset() { *m = RaftConfig{} }
+func (*RaftConfig) ProtoMessage() {}
+func (*RaftConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{27} }
+
+type RaftMember struct {
+ // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified
+ // and is used only for information purposes
+ RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"`
+ // Addr specifies the address of the member
+ Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
+ // Status provides the current status of the manager from the perspective of another manager.
+ Status RaftMemberStatus `protobuf:"bytes,3,opt,name=status" json:"status"`
+}
+
+func (m *RaftMember) Reset() { *m = RaftMember{} }
+func (*RaftMember) ProtoMessage() {}
+func (*RaftMember) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{28} }
+
+// Placement specifies task distribution constraints.
+type Placement struct {
+ // constraints specifies a set of requirements a node should meet for a task.
+ Constraints []string `protobuf:"bytes,1,rep,name=constraints" json:"constraints,omitempty"`
+}
+
+func (m *Placement) Reset() { *m = Placement{} }
+func (*Placement) ProtoMessage() {}
+func (*Placement) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{29} }
+
+type RootCA struct {
+ // CAKey is the root CA private key.
+ CAKey []byte `protobuf:"bytes,1,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"`
+ // CACert is the root CA certificate.
+ CACert []byte `protobuf:"bytes,2,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"`
+ // CACertHash is the digest of the CA Certificate.
+ CACertHash string `protobuf:"bytes,3,opt,name=ca_cert_hash,json=caCertHash,proto3" json:"ca_cert_hash,omitempty"`
+}
+
+func (m *RootCA) Reset() { *m = RootCA{} }
+func (*RootCA) ProtoMessage() {}
+func (*RootCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30} }
+
+type Certificate struct {
+ Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"`
+ CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"`
+ Status IssuanceStatus `protobuf:"bytes,3,opt,name=status" json:"status"`
+ Certificate []byte `protobuf:"bytes,4,opt,name=certificate,proto3" json:"certificate,omitempty"`
+ // CN represents the node ID.
+ CN string `protobuf:"bytes,5,opt,name=cn,proto3" json:"cn,omitempty"`
+}
+
+func (m *Certificate) Reset() { *m = Certificate{} }
+func (*Certificate) ProtoMessage() {}
+func (*Certificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{31} }
+
+// Symmetric keys to encrypt inter-agent communication.
+type EncryptionKey struct {
+ // Agent subsystem the key is intended for. Example:
+ // networking:gossip
+ Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"`
+ Algorithm EncryptionKey_Algorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.EncryptionKey_Algorithm" json:"algorithm,omitempty"`
+ Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+ // Time stamp from the lamport clock of the key allocator to
+ // identify the relative age of the key.
+ LamportTime uint64 `protobuf:"varint,4,opt,name=lamport_time,json=lamportTime,proto3" json:"lamport_time,omitempty"`
+}
+
+func (m *EncryptionKey) Reset() { *m = EncryptionKey{} }
+func (*EncryptionKey) ProtoMessage() {}
+func (*EncryptionKey) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{32} }
+
+// ManagerStatus provides information about the status of a manager in the cluster.
+type ManagerStatus struct {
+ Raft RaftMember `protobuf:"bytes,1,opt,name=raft" json:"raft"`
+}
+
+func (m *ManagerStatus) Reset() { *m = ManagerStatus{} }
+func (*ManagerStatus) ProtoMessage() {}
+func (*ManagerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{33} }
+
+func init() {
+ proto.RegisterType((*Version)(nil), "docker.swarmkit.v1.Version")
+ proto.RegisterType((*Annotations)(nil), "docker.swarmkit.v1.Annotations")
+ proto.RegisterType((*Resources)(nil), "docker.swarmkit.v1.Resources")
+ proto.RegisterType((*ResourceRequirements)(nil), "docker.swarmkit.v1.ResourceRequirements")
+ proto.RegisterType((*Platform)(nil), "docker.swarmkit.v1.Platform")
+ proto.RegisterType((*PluginDescription)(nil), "docker.swarmkit.v1.PluginDescription")
+ proto.RegisterType((*EngineDescription)(nil), "docker.swarmkit.v1.EngineDescription")
+ proto.RegisterType((*NodeDescription)(nil), "docker.swarmkit.v1.NodeDescription")
+ proto.RegisterType((*RaftMemberStatus)(nil), "docker.swarmkit.v1.RaftMemberStatus")
+ proto.RegisterType((*NodeStatus)(nil), "docker.swarmkit.v1.NodeStatus")
+ proto.RegisterType((*Image)(nil), "docker.swarmkit.v1.Image")
+ proto.RegisterType((*Mount)(nil), "docker.swarmkit.v1.Mount")
+ proto.RegisterType((*Mount_BindOptions)(nil), "docker.swarmkit.v1.Mount.BindOptions")
+ proto.RegisterType((*Mount_VolumeOptions)(nil), "docker.swarmkit.v1.Mount.VolumeOptions")
+ proto.RegisterType((*RestartPolicy)(nil), "docker.swarmkit.v1.RestartPolicy")
+ proto.RegisterType((*UpdateConfig)(nil), "docker.swarmkit.v1.UpdateConfig")
+ proto.RegisterType((*ContainerStatus)(nil), "docker.swarmkit.v1.ContainerStatus")
+ proto.RegisterType((*TaskStatus)(nil), "docker.swarmkit.v1.TaskStatus")
+ proto.RegisterType((*IPAMConfig)(nil), "docker.swarmkit.v1.IPAMConfig")
+ proto.RegisterType((*PortConfig)(nil), "docker.swarmkit.v1.PortConfig")
+ proto.RegisterType((*Driver)(nil), "docker.swarmkit.v1.Driver")
+ proto.RegisterType((*IPAMOptions)(nil), "docker.swarmkit.v1.IPAMOptions")
+ proto.RegisterType((*Peer)(nil), "docker.swarmkit.v1.Peer")
+ proto.RegisterType((*WeightedPeer)(nil), "docker.swarmkit.v1.WeightedPeer")
+ proto.RegisterType((*IssuanceStatus)(nil), "docker.swarmkit.v1.IssuanceStatus")
+ proto.RegisterType((*AcceptancePolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy")
+ proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy")
+ proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy_HashedSecret)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.HashedSecret")
+ proto.RegisterType((*CAConfig)(nil), "docker.swarmkit.v1.CAConfig")
+ proto.RegisterType((*OrchestrationConfig)(nil), "docker.swarmkit.v1.OrchestrationConfig")
+ proto.RegisterType((*DispatcherConfig)(nil), "docker.swarmkit.v1.DispatcherConfig")
+ proto.RegisterType((*RaftConfig)(nil), "docker.swarmkit.v1.RaftConfig")
+ proto.RegisterType((*RaftMember)(nil), "docker.swarmkit.v1.RaftMember")
+ proto.RegisterType((*Placement)(nil), "docker.swarmkit.v1.Placement")
+ proto.RegisterType((*RootCA)(nil), "docker.swarmkit.v1.RootCA")
+ proto.RegisterType((*Certificate)(nil), "docker.swarmkit.v1.Certificate")
+ proto.RegisterType((*EncryptionKey)(nil), "docker.swarmkit.v1.EncryptionKey")
+ proto.RegisterType((*ManagerStatus)(nil), "docker.swarmkit.v1.ManagerStatus")
+ proto.RegisterEnum("docker.swarmkit.v1.TaskState", TaskState_name, TaskState_value)
+ proto.RegisterEnum("docker.swarmkit.v1.NodeRole", NodeRole_name, NodeRole_value)
+ proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value)
+ proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value)
+ proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value)
+ proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value)
+ proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value)
+ proto.RegisterEnum("docker.swarmkit.v1.IPAMConfig_AddressFamily", IPAMConfig_AddressFamily_name, IPAMConfig_AddressFamily_value)
+ proto.RegisterEnum("docker.swarmkit.v1.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value)
+ proto.RegisterEnum("docker.swarmkit.v1.IssuanceStatus_State", IssuanceStatus_State_name, IssuanceStatus_State_value)
+ proto.RegisterEnum("docker.swarmkit.v1.EncryptionKey_Algorithm", EncryptionKey_Algorithm_name, EncryptionKey_Algorithm_value)
+}
+
+func (m *Version) Copy() *Version {
+ if m == nil {
+ return nil
+ }
+
+ o := &Version{
+ Index: m.Index,
+ }
+
+ return o
+}
+
+func (m *Annotations) Copy() *Annotations {
+ if m == nil {
+ return nil
+ }
+
+ o := &Annotations{
+ Name: m.Name,
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ return o
+}
+
+func (m *Resources) Copy() *Resources {
+ if m == nil {
+ return nil
+ }
+
+ o := &Resources{
+ NanoCPUs: m.NanoCPUs,
+ MemoryBytes: m.MemoryBytes,
+ }
+
+ return o
+}
+
+func (m *ResourceRequirements) Copy() *ResourceRequirements {
+ if m == nil {
+ return nil
+ }
+
+ o := &ResourceRequirements{
+ Limits: m.Limits.Copy(),
+ Reservations: m.Reservations.Copy(),
+ }
+
+ return o
+}
+
+func (m *Platform) Copy() *Platform {
+ if m == nil {
+ return nil
+ }
+
+ o := &Platform{
+ Architecture: m.Architecture,
+ OS: m.OS,
+ }
+
+ return o
+}
+
+func (m *PluginDescription) Copy() *PluginDescription {
+ if m == nil {
+ return nil
+ }
+
+ o := &PluginDescription{
+ Type: m.Type,
+ Name: m.Name,
+ }
+
+ return o
+}
+
+func (m *EngineDescription) Copy() *EngineDescription {
+ if m == nil {
+ return nil
+ }
+
+ o := &EngineDescription{
+ EngineVersion: m.EngineVersion,
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ if m.Plugins != nil {
+ o.Plugins = make([]PluginDescription, 0, len(m.Plugins))
+ for _, v := range m.Plugins {
+ o.Plugins = append(o.Plugins, *v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *NodeDescription) Copy() *NodeDescription {
+ if m == nil {
+ return nil
+ }
+
+ o := &NodeDescription{
+ Hostname: m.Hostname,
+ Platform: m.Platform.Copy(),
+ Resources: m.Resources.Copy(),
+ Engine: m.Engine.Copy(),
+ }
+
+ return o
+}
+
+func (m *RaftMemberStatus) Copy() *RaftMemberStatus {
+ if m == nil {
+ return nil
+ }
+
+ o := &RaftMemberStatus{
+ Leader: m.Leader,
+ Reachability: m.Reachability,
+ Message: m.Message,
+ }
+
+ return o
+}
+
+func (m *NodeStatus) Copy() *NodeStatus {
+ if m == nil {
+ return nil
+ }
+
+ o := &NodeStatus{
+ State: m.State,
+ Message: m.Message,
+ }
+
+ return o
+}
+
+func (m *Image) Copy() *Image {
+ if m == nil {
+ return nil
+ }
+
+ o := &Image{
+ Reference: m.Reference,
+ }
+
+ return o
+}
+
+func (m *Mount) Copy() *Mount {
+ if m == nil {
+ return nil
+ }
+
+ o := &Mount{
+ Type: m.Type,
+ Source: m.Source,
+ Target: m.Target,
+ Writable: m.Writable,
+ BindOptions: m.BindOptions.Copy(),
+ VolumeOptions: m.VolumeOptions.Copy(),
+ }
+
+ return o
+}
+
+func (m *Mount_BindOptions) Copy() *Mount_BindOptions {
+ if m == nil {
+ return nil
+ }
+
+ o := &Mount_BindOptions{
+ Propagation: m.Propagation,
+ }
+
+ return o
+}
+
+func (m *Mount_VolumeOptions) Copy() *Mount_VolumeOptions {
+ if m == nil {
+ return nil
+ }
+
+ o := &Mount_VolumeOptions{
+ Populate: m.Populate,
+ DriverConfig: m.DriverConfig.Copy(),
+ }
+
+ if m.Labels != nil {
+ o.Labels = make(map[string]string)
+ for k, v := range m.Labels {
+ o.Labels[k] = v
+ }
+ }
+
+ return o
+}
+
+func (m *RestartPolicy) Copy() *RestartPolicy {
+ if m == nil {
+ return nil
+ }
+
+ o := &RestartPolicy{
+ Condition: m.Condition,
+ Delay: m.Delay.Copy(),
+ MaxAttempts: m.MaxAttempts,
+ Window: m.Window.Copy(),
+ }
+
+ return o
+}
+
+func (m *UpdateConfig) Copy() *UpdateConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &UpdateConfig{
+ Parallelism: m.Parallelism,
+ Delay: *m.Delay.Copy(),
+ }
+
+ return o
+}
+
+func (m *ContainerStatus) Copy() *ContainerStatus {
+ if m == nil {
+ return nil
+ }
+
+ o := &ContainerStatus{
+ ContainerID: m.ContainerID,
+ PID: m.PID,
+ ExitCode: m.ExitCode,
+ }
+
+ return o
+}
+
+func (m *TaskStatus) Copy() *TaskStatus {
+ if m == nil {
+ return nil
+ }
+
+ o := &TaskStatus{
+ Timestamp: m.Timestamp.Copy(),
+ State: m.State,
+ Message: m.Message,
+ Err: m.Err,
+ }
+
+ switch m.RuntimeStatus.(type) {
+ case *TaskStatus_Container:
+ i := &TaskStatus_Container{
+ Container: m.GetContainer().Copy(),
+ }
+
+ o.RuntimeStatus = i
+ }
+
+ return o
+}
+
+func (m *IPAMConfig) Copy() *IPAMConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &IPAMConfig{
+ Family: m.Family,
+ Subnet: m.Subnet,
+ Range: m.Range,
+ Gateway: m.Gateway,
+ }
+
+ if m.Reserved != nil {
+ o.Reserved = make(map[string]string)
+ for k, v := range m.Reserved {
+ o.Reserved[k] = v
+ }
+ }
+
+ return o
+}
+
+func (m *PortConfig) Copy() *PortConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &PortConfig{
+ Name: m.Name,
+ Protocol: m.Protocol,
+ TargetPort: m.TargetPort,
+ PublishedPort: m.PublishedPort,
+ }
+
+ return o
+}
+
+func (m *Driver) Copy() *Driver {
+ if m == nil {
+ return nil
+ }
+
+ o := &Driver{
+ Name: m.Name,
+ }
+
+ if m.Options != nil {
+ o.Options = make(map[string]string)
+ for k, v := range m.Options {
+ o.Options[k] = v
+ }
+ }
+
+ return o
+}
+
+func (m *IPAMOptions) Copy() *IPAMOptions {
+ if m == nil {
+ return nil
+ }
+
+ o := &IPAMOptions{
+ Driver: m.Driver.Copy(),
+ }
+
+ if m.Configs != nil {
+ o.Configs = make([]*IPAMConfig, 0, len(m.Configs))
+ for _, v := range m.Configs {
+ o.Configs = append(o.Configs, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *Peer) Copy() *Peer {
+ if m == nil {
+ return nil
+ }
+
+ o := &Peer{
+ NodeID: m.NodeID,
+ Addr: m.Addr,
+ }
+
+ return o
+}
+
+func (m *WeightedPeer) Copy() *WeightedPeer {
+ if m == nil {
+ return nil
+ }
+
+ o := &WeightedPeer{
+ Peer: m.Peer.Copy(),
+ Weight: m.Weight,
+ }
+
+ return o
+}
+
+func (m *IssuanceStatus) Copy() *IssuanceStatus {
+ if m == nil {
+ return nil
+ }
+
+ o := &IssuanceStatus{
+ State: m.State,
+ Err: m.Err,
+ }
+
+ return o
+}
+
+func (m *AcceptancePolicy) Copy() *AcceptancePolicy {
+ if m == nil {
+ return nil
+ }
+
+ o := &AcceptancePolicy{}
+
+ if m.Policies != nil {
+ o.Policies = make([]*AcceptancePolicy_RoleAdmissionPolicy, 0, len(m.Policies))
+ for _, v := range m.Policies {
+ o.Policies = append(o.Policies, v.Copy())
+ }
+ }
+
+ return o
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy) Copy() *AcceptancePolicy_RoleAdmissionPolicy {
+ if m == nil {
+ return nil
+ }
+
+ o := &AcceptancePolicy_RoleAdmissionPolicy{
+ Role: m.Role,
+ Autoaccept: m.Autoaccept,
+ Secret: m.Secret.Copy(),
+ }
+
+ return o
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) Copy() *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret {
+ if m == nil {
+ return nil
+ }
+
+ o := &AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
+ Data: m.Data,
+ Alg: m.Alg,
+ }
+
+ return o
+}
+
+func (m *CAConfig) Copy() *CAConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &CAConfig{
+ NodeCertExpiry: m.NodeCertExpiry.Copy(),
+ }
+
+ return o
+}
+
+func (m *OrchestrationConfig) Copy() *OrchestrationConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &OrchestrationConfig{
+ TaskHistoryRetentionLimit: m.TaskHistoryRetentionLimit,
+ }
+
+ return o
+}
+
+func (m *DispatcherConfig) Copy() *DispatcherConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &DispatcherConfig{
+ HeartbeatPeriod: m.HeartbeatPeriod,
+ }
+
+ return o
+}
+
+func (m *RaftConfig) Copy() *RaftConfig {
+ if m == nil {
+ return nil
+ }
+
+ o := &RaftConfig{
+ SnapshotInterval: m.SnapshotInterval,
+ KeepOldSnapshots: m.KeepOldSnapshots,
+ LogEntriesForSlowFollowers: m.LogEntriesForSlowFollowers,
+ HeartbeatTick: m.HeartbeatTick,
+ ElectionTick: m.ElectionTick,
+ }
+
+ return o
+}
+
+func (m *RaftMember) Copy() *RaftMember {
+ if m == nil {
+ return nil
+ }
+
+ o := &RaftMember{
+ RaftID: m.RaftID,
+ Addr: m.Addr,
+ Status: *m.Status.Copy(),
+ }
+
+ return o
+}
+
+func (m *Placement) Copy() *Placement {
+ if m == nil {
+ return nil
+ }
+
+ o := &Placement{}
+
+ if m.Constraints != nil {
+ o.Constraints = make([]string, 0, len(m.Constraints))
+ for _, v := range m.Constraints {
+ o.Constraints = append(o.Constraints, v)
+ }
+ }
+
+ return o
+}
+
+func (m *RootCA) Copy() *RootCA {
+ if m == nil {
+ return nil
+ }
+
+ o := &RootCA{
+ CAKey: m.CAKey,
+ CACert: m.CACert,
+ CACertHash: m.CACertHash,
+ }
+
+ return o
+}
+
+func (m *Certificate) Copy() *Certificate {
+ if m == nil {
+ return nil
+ }
+
+ o := &Certificate{
+ Role: m.Role,
+ CSR: m.CSR,
+ Status: *m.Status.Copy(),
+ Certificate: m.Certificate,
+ CN: m.CN,
+ }
+
+ return o
+}
+
+func (m *EncryptionKey) Copy() *EncryptionKey {
+ if m == nil {
+ return nil
+ }
+
+ o := &EncryptionKey{
+ Subsystem: m.Subsystem,
+ Algorithm: m.Algorithm,
+ Key: m.Key,
+ LamportTime: m.LamportTime,
+ }
+
+ return o
+}
+
+func (m *ManagerStatus) Copy() *ManagerStatus {
+ if m == nil {
+ return nil
+ }
+
+ o := &ManagerStatus{
+ Raft: *m.Raft.Copy(),
+ }
+
+ return o
+}
+
+func (this *Version) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.Version{")
+ s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Annotations) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.Annotations{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Resources) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.Resources{")
+ s = append(s, "NanoCPUs: "+fmt.Sprintf("%#v", this.NanoCPUs)+",\n")
+ s = append(s, "MemoryBytes: "+fmt.Sprintf("%#v", this.MemoryBytes)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ResourceRequirements) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.ResourceRequirements{")
+ if this.Limits != nil {
+ s = append(s, "Limits: "+fmt.Sprintf("%#v", this.Limits)+",\n")
+ }
+ if this.Reservations != nil {
+ s = append(s, "Reservations: "+fmt.Sprintf("%#v", this.Reservations)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Platform) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.Platform{")
+ s = append(s, "Architecture: "+fmt.Sprintf("%#v", this.Architecture)+",\n")
+ s = append(s, "OS: "+fmt.Sprintf("%#v", this.OS)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *PluginDescription) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.PluginDescription{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EngineDescription) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.EngineDescription{")
+ s = append(s, "EngineVersion: "+fmt.Sprintf("%#v", this.EngineVersion)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ if this.Plugins != nil {
+ s = append(s, "Plugins: "+fmt.Sprintf("%#v", this.Plugins)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NodeDescription) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.NodeDescription{")
+ s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n")
+ if this.Platform != nil {
+ s = append(s, "Platform: "+fmt.Sprintf("%#v", this.Platform)+",\n")
+ }
+ if this.Resources != nil {
+ s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n")
+ }
+ if this.Engine != nil {
+ s = append(s, "Engine: "+fmt.Sprintf("%#v", this.Engine)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RaftMemberStatus) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.RaftMemberStatus{")
+ s = append(s, "Leader: "+fmt.Sprintf("%#v", this.Leader)+",\n")
+ s = append(s, "Reachability: "+fmt.Sprintf("%#v", this.Reachability)+",\n")
+ s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *NodeStatus) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.NodeStatus{")
+ s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
+ s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Image) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.Image{")
+ s = append(s, "Reference: "+fmt.Sprintf("%#v", this.Reference)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Mount) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&api.Mount{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
+ s = append(s, "Target: "+fmt.Sprintf("%#v", this.Target)+",\n")
+ s = append(s, "Writable: "+fmt.Sprintf("%#v", this.Writable)+",\n")
+ if this.BindOptions != nil {
+ s = append(s, "BindOptions: "+fmt.Sprintf("%#v", this.BindOptions)+",\n")
+ }
+ if this.VolumeOptions != nil {
+ s = append(s, "VolumeOptions: "+fmt.Sprintf("%#v", this.VolumeOptions)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Mount_BindOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.Mount_BindOptions{")
+ s = append(s, "Propagation: "+fmt.Sprintf("%#v", this.Propagation)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Mount_VolumeOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.Mount_VolumeOptions{")
+ s = append(s, "Populate: "+fmt.Sprintf("%#v", this.Populate)+",\n")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ if this.DriverConfig != nil {
+ s = append(s, "DriverConfig: "+fmt.Sprintf("%#v", this.DriverConfig)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RestartPolicy) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.RestartPolicy{")
+ s = append(s, "Condition: "+fmt.Sprintf("%#v", this.Condition)+",\n")
+ if this.Delay != nil {
+ s = append(s, "Delay: "+fmt.Sprintf("%#v", this.Delay)+",\n")
+ }
+ s = append(s, "MaxAttempts: "+fmt.Sprintf("%#v", this.MaxAttempts)+",\n")
+ if this.Window != nil {
+ s = append(s, "Window: "+fmt.Sprintf("%#v", this.Window)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UpdateConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.UpdateConfig{")
+ s = append(s, "Parallelism: "+fmt.Sprintf("%#v", this.Parallelism)+",\n")
+ s = append(s, "Delay: "+strings.Replace(this.Delay.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ContainerStatus) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.ContainerStatus{")
+ s = append(s, "ContainerID: "+fmt.Sprintf("%#v", this.ContainerID)+",\n")
+ s = append(s, "PID: "+fmt.Sprintf("%#v", this.PID)+",\n")
+ s = append(s, "ExitCode: "+fmt.Sprintf("%#v", this.ExitCode)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TaskStatus) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.TaskStatus{")
+ if this.Timestamp != nil {
+ s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+ }
+ s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
+ s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
+ s = append(s, "Err: "+fmt.Sprintf("%#v", this.Err)+",\n")
+ if this.RuntimeStatus != nil {
+ s = append(s, "RuntimeStatus: "+fmt.Sprintf("%#v", this.RuntimeStatus)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TaskStatus_Container) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&api.TaskStatus_Container{` +
+ `Container:` + fmt.Sprintf("%#v", this.Container) + `}`}, ", ")
+ return s
+}
+func (this *IPAMConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.IPAMConfig{")
+ s = append(s, "Family: "+fmt.Sprintf("%#v", this.Family)+",\n")
+ s = append(s, "Subnet: "+fmt.Sprintf("%#v", this.Subnet)+",\n")
+ s = append(s, "Range: "+fmt.Sprintf("%#v", this.Range)+",\n")
+ s = append(s, "Gateway: "+fmt.Sprintf("%#v", this.Gateway)+",\n")
+ keysForReserved := make([]string, 0, len(this.Reserved))
+ for k, _ := range this.Reserved {
+ keysForReserved = append(keysForReserved, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForReserved)
+ mapStringForReserved := "map[string]string{"
+ for _, k := range keysForReserved {
+ mapStringForReserved += fmt.Sprintf("%#v: %#v,", k, this.Reserved[k])
+ }
+ mapStringForReserved += "}"
+ if this.Reserved != nil {
+ s = append(s, "Reserved: "+mapStringForReserved+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *PortConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.PortConfig{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n")
+ s = append(s, "TargetPort: "+fmt.Sprintf("%#v", this.TargetPort)+",\n")
+ s = append(s, "PublishedPort: "+fmt.Sprintf("%#v", this.PublishedPort)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Driver) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.Driver{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ keysForOptions := make([]string, 0, len(this.Options))
+ for k, _ := range this.Options {
+ keysForOptions = append(keysForOptions, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+ mapStringForOptions := "map[string]string{"
+ for _, k := range keysForOptions {
+ mapStringForOptions += fmt.Sprintf("%#v: %#v,", k, this.Options[k])
+ }
+ mapStringForOptions += "}"
+ if this.Options != nil {
+ s = append(s, "Options: "+mapStringForOptions+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *IPAMOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.IPAMOptions{")
+ if this.Driver != nil {
+ s = append(s, "Driver: "+fmt.Sprintf("%#v", this.Driver)+",\n")
+ }
+ if this.Configs != nil {
+ s = append(s, "Configs: "+fmt.Sprintf("%#v", this.Configs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Peer) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.Peer{")
+ s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+ s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *WeightedPeer) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.WeightedPeer{")
+ if this.Peer != nil {
+ s = append(s, "Peer: "+fmt.Sprintf("%#v", this.Peer)+",\n")
+ }
+ s = append(s, "Weight: "+fmt.Sprintf("%#v", this.Weight)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *IssuanceStatus) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.IssuanceStatus{")
+ s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
+ s = append(s, "Err: "+fmt.Sprintf("%#v", this.Err)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *AcceptancePolicy) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.AcceptancePolicy{")
+ if this.Policies != nil {
+ s = append(s, "Policies: "+fmt.Sprintf("%#v", this.Policies)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *AcceptancePolicy_RoleAdmissionPolicy) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.AcceptancePolicy_RoleAdmissionPolicy{")
+ s = append(s, "Role: "+fmt.Sprintf("%#v", this.Role)+",\n")
+ s = append(s, "Autoaccept: "+fmt.Sprintf("%#v", this.Autoaccept)+",\n")
+ if this.Secret != nil {
+ s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&api.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{")
+ s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
+ s = append(s, "Alg: "+fmt.Sprintf("%#v", this.Alg)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CAConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.CAConfig{")
+ if this.NodeCertExpiry != nil {
+ s = append(s, "NodeCertExpiry: "+fmt.Sprintf("%#v", this.NodeCertExpiry)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *OrchestrationConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.OrchestrationConfig{")
+ s = append(s, "TaskHistoryRetentionLimit: "+fmt.Sprintf("%#v", this.TaskHistoryRetentionLimit)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DispatcherConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.DispatcherConfig{")
+ s = append(s, "HeartbeatPeriod: "+fmt.Sprintf("%#v", this.HeartbeatPeriod)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RaftConfig) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.RaftConfig{")
+ s = append(s, "SnapshotInterval: "+fmt.Sprintf("%#v", this.SnapshotInterval)+",\n")
+ s = append(s, "KeepOldSnapshots: "+fmt.Sprintf("%#v", this.KeepOldSnapshots)+",\n")
+ s = append(s, "LogEntriesForSlowFollowers: "+fmt.Sprintf("%#v", this.LogEntriesForSlowFollowers)+",\n")
+ s = append(s, "HeartbeatTick: "+fmt.Sprintf("%#v", this.HeartbeatTick)+",\n")
+ s = append(s, "ElectionTick: "+fmt.Sprintf("%#v", this.ElectionTick)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RaftMember) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.RaftMember{")
+ s = append(s, "RaftID: "+fmt.Sprintf("%#v", this.RaftID)+",\n")
+ s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
+ s = append(s, "Status: "+strings.Replace(this.Status.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Placement) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.Placement{")
+ s = append(s, "Constraints: "+fmt.Sprintf("%#v", this.Constraints)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *RootCA) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&api.RootCA{")
+ s = append(s, "CAKey: "+fmt.Sprintf("%#v", this.CAKey)+",\n")
+ s = append(s, "CACert: "+fmt.Sprintf("%#v", this.CACert)+",\n")
+ s = append(s, "CACertHash: "+fmt.Sprintf("%#v", this.CACertHash)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Certificate) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&api.Certificate{")
+ s = append(s, "Role: "+fmt.Sprintf("%#v", this.Role)+",\n")
+ s = append(s, "CSR: "+fmt.Sprintf("%#v", this.CSR)+",\n")
+ s = append(s, "Status: "+strings.Replace(this.Status.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Certificate: "+fmt.Sprintf("%#v", this.Certificate)+",\n")
+ s = append(s, "CN: "+fmt.Sprintf("%#v", this.CN)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EncryptionKey) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&api.EncryptionKey{")
+ s = append(s, "Subsystem: "+fmt.Sprintf("%#v", this.Subsystem)+",\n")
+ s = append(s, "Algorithm: "+fmt.Sprintf("%#v", this.Algorithm)+",\n")
+ s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
+ s = append(s, "LamportTime: "+fmt.Sprintf("%#v", this.LamportTime)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ManagerStatus) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&api.ManagerStatus{")
+ s = append(s, "Raft: "+strings.Replace(this.Raft.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringTypes(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringTypes(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *Version) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Version) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Index != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Index))
+ }
+ return i, nil
+}
+
+func (m *Annotations) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Annotations) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x12
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ i = encodeVarintTypes(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *Resources) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Resources) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.NanoCPUs != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.NanoCPUs))
+ }
+ if m.MemoryBytes != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.MemoryBytes))
+ }
+ return i, nil
+}
+
+func (m *ResourceRequirements) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Limits != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Limits.Size()))
+ n1, err := m.Limits.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.Reservations != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Reservations.Size()))
+ n2, err := m.Reservations.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func (m *Platform) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Platform) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Architecture) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Architecture)))
+ i += copy(data[i:], m.Architecture)
+ }
+ if len(m.OS) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.OS)))
+ i += copy(data[i:], m.OS)
+ }
+ return i, nil
+}
+
+func (m *PluginDescription) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PluginDescription) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Type) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ }
+ if len(m.Name) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *EngineDescription) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EngineDescription) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.EngineVersion) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.EngineVersion)))
+ i += copy(data[i:], m.EngineVersion)
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x12
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ i = encodeVarintTypes(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.Plugins) > 0 {
+ for _, msg := range m.Plugins {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NodeDescription) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeDescription) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Hostname) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Hostname)))
+ i += copy(data[i:], m.Hostname)
+ }
+ if m.Platform != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Platform.Size()))
+ n3, err := m.Platform.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ if m.Resources != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Resources.Size()))
+ n4, err := m.Resources.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ if m.Engine != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Engine.Size()))
+ n5, err := m.Engine.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ return i, nil
+}
+
+func (m *RaftMemberStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RaftMemberStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Leader {
+ data[i] = 0x8
+ i++
+ if m.Leader {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.Reachability != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Reachability))
+ }
+ if len(m.Message) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ }
+ return i, nil
+}
+
+func (m *NodeStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.State != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.State))
+ }
+ if len(m.Message) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ }
+ return i, nil
+}
+
+func (m *Image) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Image) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Reference) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Reference)))
+ i += copy(data[i:], m.Reference)
+ }
+ return i, nil
+}
+
+func (m *Mount) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Mount) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Type != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Type))
+ }
+ if len(m.Source) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Source)))
+ i += copy(data[i:], m.Source)
+ }
+ if len(m.Target) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Target)))
+ i += copy(data[i:], m.Target)
+ }
+ if m.Writable {
+ data[i] = 0x20
+ i++
+ if m.Writable {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.BindOptions != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.BindOptions.Size()))
+ n6, err := m.BindOptions.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ if m.VolumeOptions != nil {
+ data[i] = 0x32
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.VolumeOptions.Size()))
+ n7, err := m.VolumeOptions.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ return i, nil
+}
+
+func (m *Mount_BindOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Mount_BindOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Propagation != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Propagation))
+ }
+ return i, nil
+}
+
+func (m *Mount_VolumeOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Mount_VolumeOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Populate {
+ data[i] = 0x8
+ i++
+ if m.Populate {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ data[i] = 0x12
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ i = encodeVarintTypes(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if m.DriverConfig != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.DriverConfig.Size()))
+ n8, err := m.DriverConfig.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+
+func (m *RestartPolicy) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RestartPolicy) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Condition != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Condition))
+ }
+ if m.Delay != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Delay.Size()))
+ n9, err := m.Delay.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ if m.MaxAttempts != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.MaxAttempts))
+ }
+ if m.Window != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Window.Size()))
+ n10, err := m.Window.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
+ return i, nil
+}
+
+func (m *UpdateConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *UpdateConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Parallelism != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Parallelism))
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Delay.Size()))
+ n11, err := m.Delay.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ return i, nil
+}
+
+func (m *ContainerStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ContainerID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.ContainerID)))
+ i += copy(data[i:], m.ContainerID)
+ }
+ if m.PID != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.PID))
+ }
+ if m.ExitCode != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.ExitCode))
+ }
+ return i, nil
+}
+
+func (m *TaskStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TaskStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Timestamp.Size()))
+ n12, err := m.Timestamp.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ }
+ if m.State != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.State))
+ }
+ if len(m.Message) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ }
+ if len(m.Err) > 0 {
+ data[i] = 0x22
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Err)))
+ i += copy(data[i:], m.Err)
+ }
+ if m.RuntimeStatus != nil {
+ nn13, err := m.RuntimeStatus.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn13
+ }
+ return i, nil
+}
+
+func (m *TaskStatus_Container) MarshalTo(data []byte) (int, error) {
+ i := 0
+ if m.Container != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Container.Size()))
+ n14, err := m.Container.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ return i, nil
+}
+func (m *IPAMConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IPAMConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Family != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Family))
+ }
+ if len(m.Subnet) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Subnet)))
+ i += copy(data[i:], m.Subnet)
+ }
+ if len(m.Range) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Range)))
+ i += copy(data[i:], m.Range)
+ }
+ if len(m.Gateway) > 0 {
+ data[i] = 0x22
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Gateway)))
+ i += copy(data[i:], m.Gateway)
+ }
+ if len(m.Reserved) > 0 {
+ for k, _ := range m.Reserved {
+ data[i] = 0x2a
+ i++
+ v := m.Reserved[k]
+ mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ i = encodeVarintTypes(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *PortConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PortConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ if m.Protocol != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Protocol))
+ }
+ if m.TargetPort != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.TargetPort))
+ }
+ if m.PublishedPort != 0 {
+ data[i] = 0x20
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.PublishedPort))
+ }
+ return i, nil
+}
+
+func (m *Driver) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Driver) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ }
+ if len(m.Options) > 0 {
+ for k, _ := range m.Options {
+ data[i] = 0x12
+ i++
+ v := m.Options[k]
+ mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ i = encodeVarintTypes(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *IPAMOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IPAMOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Driver != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Driver.Size()))
+ n15, err := m.Driver.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ }
+ if len(m.Configs) > 0 {
+ for _, msg := range m.Configs {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Peer) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Peer) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeID) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.NodeID)))
+ i += copy(data[i:], m.NodeID)
+ }
+ if len(m.Addr) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Addr)))
+ i += copy(data[i:], m.Addr)
+ }
+ return i, nil
+}
+
+func (m *WeightedPeer) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *WeightedPeer) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Peer != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Peer.Size()))
+ n16, err := m.Peer.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ if m.Weight != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Weight))
+ }
+ return i, nil
+}
+
+func (m *IssuanceStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IssuanceStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.State != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.State))
+ }
+ if len(m.Err) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Err)))
+ i += copy(data[i:], m.Err)
+ }
+ return i, nil
+}
+
+func (m *AcceptancePolicy) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *AcceptancePolicy) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Policies) > 0 {
+ for _, msg := range m.Policies {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Role != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Role))
+ }
+ if m.Autoaccept {
+ data[i] = 0x10
+ i++
+ if m.Autoaccept {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.Secret != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Secret.Size()))
+ n17, err := m.Secret.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ return i, nil
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Data) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ if len(m.Alg) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Alg)))
+ i += copy(data[i:], m.Alg)
+ }
+ return i, nil
+}
+
+func (m *CAConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CAConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.NodeCertExpiry != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.NodeCertExpiry.Size()))
+ n18, err := m.NodeCertExpiry.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ return i, nil
+}
+
+func (m *OrchestrationConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *OrchestrationConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.TaskHistoryRetentionLimit != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.TaskHistoryRetentionLimit))
+ }
+ return i, nil
+}
+
+func (m *DispatcherConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DispatcherConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.HeartbeatPeriod != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.HeartbeatPeriod))
+ }
+ return i, nil
+}
+
+func (m *RaftConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RaftConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.SnapshotInterval != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.SnapshotInterval))
+ }
+ if m.KeepOldSnapshots != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.KeepOldSnapshots))
+ }
+ if m.LogEntriesForSlowFollowers != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.LogEntriesForSlowFollowers))
+ }
+ if m.HeartbeatTick != 0 {
+ data[i] = 0x20
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.HeartbeatTick))
+ }
+ if m.ElectionTick != 0 {
+ data[i] = 0x28
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.ElectionTick))
+ }
+ return i, nil
+}
+
+func (m *RaftMember) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RaftMember) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.RaftID != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.RaftID))
+ }
+ if len(m.Addr) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Addr)))
+ i += copy(data[i:], m.Addr)
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Status.Size()))
+ n19, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ return i, nil
+}
+
+func (m *Placement) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Placement) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Constraints) > 0 {
+ for _, s := range m.Constraints {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *RootCA) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RootCA) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.CAKey) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.CAKey)))
+ i += copy(data[i:], m.CAKey)
+ }
+ if len(m.CACert) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.CACert)))
+ i += copy(data[i:], m.CACert)
+ }
+ if len(m.CACertHash) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.CACertHash)))
+ i += copy(data[i:], m.CACertHash)
+ }
+ return i, nil
+}
+
+func (m *Certificate) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Certificate) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Role != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Role))
+ }
+ if len(m.CSR) > 0 {
+ data[i] = 0x12
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.CSR)))
+ i += copy(data[i:], m.CSR)
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Status.Size()))
+ n20, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ if len(m.Certificate) > 0 {
+ data[i] = 0x22
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Certificate)))
+ i += copy(data[i:], m.Certificate)
+ }
+ if len(m.CN) > 0 {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.CN)))
+ i += copy(data[i:], m.CN)
+ }
+ return i, nil
+}
+
+func (m *EncryptionKey) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EncryptionKey) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Subsystem) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Subsystem)))
+ i += copy(data[i:], m.Subsystem)
+ }
+ if m.Algorithm != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Algorithm))
+ }
+ if len(m.Key) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintTypes(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ }
+ if m.LamportTime != 0 {
+ data[i] = 0x20
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.LamportTime))
+ }
+ return i, nil
+}
+
+func (m *ManagerStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ManagerStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintTypes(data, i, uint64(m.Raft.Size()))
+ n21, err := m.Raft.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ return i, nil
+}
+
+func encodeFixed64Types(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Types(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintTypes(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+func (m *Version) Size() (n int) {
+ var l int
+ _ = l
+ if m.Index != 0 {
+ n += 1 + sovTypes(uint64(m.Index))
+ }
+ return n
+}
+
+func (m *Annotations) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *Resources) Size() (n int) {
+ var l int
+ _ = l
+ if m.NanoCPUs != 0 {
+ n += 1 + sovTypes(uint64(m.NanoCPUs))
+ }
+ if m.MemoryBytes != 0 {
+ n += 1 + sovTypes(uint64(m.MemoryBytes))
+ }
+ return n
+}
+
+func (m *ResourceRequirements) Size() (n int) {
+ var l int
+ _ = l
+ if m.Limits != nil {
+ l = m.Limits.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Reservations != nil {
+ l = m.Reservations.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *Platform) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Architecture)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.OS)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *PluginDescription) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *EngineDescription) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.EngineVersion)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Plugins) > 0 {
+ for _, e := range m.Plugins {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NodeDescription) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Hostname)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Platform != nil {
+ l = m.Platform.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Resources != nil {
+ l = m.Resources.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Engine != nil {
+ l = m.Engine.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *RaftMemberStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.Leader {
+ n += 2
+ }
+ if m.Reachability != 0 {
+ n += 1 + sovTypes(uint64(m.Reachability))
+ }
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *NodeStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.State != 0 {
+ n += 1 + sovTypes(uint64(m.State))
+ }
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *Image) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Reference)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *Mount) Size() (n int) {
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovTypes(uint64(m.Type))
+ }
+ l = len(m.Source)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.Target)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Writable {
+ n += 2
+ }
+ if m.BindOptions != nil {
+ l = m.BindOptions.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.VolumeOptions != nil {
+ l = m.VolumeOptions.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *Mount_BindOptions) Size() (n int) {
+ var l int
+ _ = l
+ if m.Propagation != 0 {
+ n += 1 + sovTypes(uint64(m.Propagation))
+ }
+ return n
+}
+
+func (m *Mount_VolumeOptions) Size() (n int) {
+ var l int
+ _ = l
+ if m.Populate {
+ n += 2
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
+ }
+ }
+ if m.DriverConfig != nil {
+ l = m.DriverConfig.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *RestartPolicy) Size() (n int) {
+ var l int
+ _ = l
+ if m.Condition != 0 {
+ n += 1 + sovTypes(uint64(m.Condition))
+ }
+ if m.Delay != nil {
+ l = m.Delay.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.MaxAttempts != 0 {
+ n += 1 + sovTypes(uint64(m.MaxAttempts))
+ }
+ if m.Window != nil {
+ l = m.Window.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *UpdateConfig) Size() (n int) {
+ var l int
+ _ = l
+ if m.Parallelism != 0 {
+ n += 1 + sovTypes(uint64(m.Parallelism))
+ }
+ l = m.Delay.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ return n
+}
+
+func (m *ContainerStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ContainerID)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.PID != 0 {
+ n += 1 + sovTypes(uint64(m.PID))
+ }
+ if m.ExitCode != 0 {
+ n += 1 + sovTypes(uint64(m.ExitCode))
+ }
+ return n
+}
+
+func (m *TaskStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.Timestamp != nil {
+ l = m.Timestamp.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.State != 0 {
+ n += 1 + sovTypes(uint64(m.State))
+ }
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.Err)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.RuntimeStatus != nil {
+ n += m.RuntimeStatus.Size()
+ }
+ return n
+}
+
+func (m *TaskStatus_Container) Size() (n int) {
+ var l int
+ _ = l
+ if m.Container != nil {
+ l = m.Container.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+func (m *IPAMConfig) Size() (n int) {
+ var l int
+ _ = l
+ if m.Family != 0 {
+ n += 1 + sovTypes(uint64(m.Family))
+ }
+ l = len(m.Subnet)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.Range)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.Gateway)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if len(m.Reserved) > 0 {
+ for k, v := range m.Reserved {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *PortConfig) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Protocol != 0 {
+ n += 1 + sovTypes(uint64(m.Protocol))
+ }
+ if m.TargetPort != 0 {
+ n += 1 + sovTypes(uint64(m.TargetPort))
+ }
+ if m.PublishedPort != 0 {
+ n += 1 + sovTypes(uint64(m.PublishedPort))
+ }
+ return n
+}
+
+func (m *Driver) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if len(m.Options) > 0 {
+ for k, v := range m.Options {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
+ n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *IPAMOptions) Size() (n int) {
+ var l int
+ _ = l
+ if m.Driver != nil {
+ l = m.Driver.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if len(m.Configs) > 0 {
+ for _, e := range m.Configs {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Peer) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.NodeID)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.Addr)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *WeightedPeer) Size() (n int) {
+ var l int
+ _ = l
+ if m.Peer != nil {
+ l = m.Peer.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Weight != 0 {
+ n += 1 + sovTypes(uint64(m.Weight))
+ }
+ return n
+}
+
+func (m *IssuanceStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.State != 0 {
+ n += 1 + sovTypes(uint64(m.State))
+ }
+ l = len(m.Err)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *AcceptancePolicy) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Policies) > 0 {
+ for _, e := range m.Policies {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy) Size() (n int) {
+ var l int
+ _ = l
+ if m.Role != 0 {
+ n += 1 + sovTypes(uint64(m.Role))
+ }
+ if m.Autoaccept {
+ n += 2
+ }
+ if m.Secret != nil {
+ l = m.Secret.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.Alg)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *CAConfig) Size() (n int) {
+ var l int
+ _ = l
+ if m.NodeCertExpiry != nil {
+ l = m.NodeCertExpiry.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *OrchestrationConfig) Size() (n int) {
+ var l int
+ _ = l
+ if m.TaskHistoryRetentionLimit != 0 {
+ n += 1 + sovTypes(uint64(m.TaskHistoryRetentionLimit))
+ }
+ return n
+}
+
+func (m *DispatcherConfig) Size() (n int) {
+ var l int
+ _ = l
+ if m.HeartbeatPeriod != 0 {
+ n += 1 + sovTypes(uint64(m.HeartbeatPeriod))
+ }
+ return n
+}
+
+func (m *RaftConfig) Size() (n int) {
+ var l int
+ _ = l
+ if m.SnapshotInterval != 0 {
+ n += 1 + sovTypes(uint64(m.SnapshotInterval))
+ }
+ if m.KeepOldSnapshots != 0 {
+ n += 1 + sovTypes(uint64(m.KeepOldSnapshots))
+ }
+ if m.LogEntriesForSlowFollowers != 0 {
+ n += 1 + sovTypes(uint64(m.LogEntriesForSlowFollowers))
+ }
+ if m.HeartbeatTick != 0 {
+ n += 1 + sovTypes(uint64(m.HeartbeatTick))
+ }
+ if m.ElectionTick != 0 {
+ n += 1 + sovTypes(uint64(m.ElectionTick))
+ }
+ return n
+}
+
+func (m *RaftMember) Size() (n int) {
+ var l int
+ _ = l
+ if m.RaftID != 0 {
+ n += 1 + sovTypes(uint64(m.RaftID))
+ }
+ l = len(m.Addr)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = m.Status.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ return n
+}
+
+func (m *Placement) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Constraints) > 0 {
+ for _, s := range m.Constraints {
+ l = len(s)
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RootCA) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.CAKey)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.CACert)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.CACertHash)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *Certificate) Size() (n int) {
+ var l int
+ _ = l
+ if m.Role != 0 {
+ n += 1 + sovTypes(uint64(m.Role))
+ }
+ l = len(m.CSR)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = m.Status.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ l = len(m.Certificate)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ l = len(m.CN)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *EncryptionKey) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Subsystem)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Algorithm != 0 {
+ n += 1 + sovTypes(uint64(m.Algorithm))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.LamportTime != 0 {
+ n += 1 + sovTypes(uint64(m.LamportTime))
+ }
+ return n
+}
+
+func (m *ManagerStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Raft.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ return n
+}
+
+func sovTypes(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozTypes(x uint64) (n int) {
+ return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Version) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Version{`,
+ `Index:` + fmt.Sprintf("%v", this.Index) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Annotations) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&Annotations{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Resources) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Resources{`,
+ `NanoCPUs:` + fmt.Sprintf("%v", this.NanoCPUs) + `,`,
+ `MemoryBytes:` + fmt.Sprintf("%v", this.MemoryBytes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceRequirements) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceRequirements{`,
+ `Limits:` + strings.Replace(fmt.Sprintf("%v", this.Limits), "Resources", "Resources", 1) + `,`,
+ `Reservations:` + strings.Replace(fmt.Sprintf("%v", this.Reservations), "Resources", "Resources", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Platform) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Platform{`,
+ `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
+ `OS:` + fmt.Sprintf("%v", this.OS) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PluginDescription) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PluginDescription{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EngineDescription) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&EngineDescription{`,
+ `EngineVersion:` + fmt.Sprintf("%v", this.EngineVersion) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "PluginDescription", "PluginDescription", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NodeDescription) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NodeDescription{`,
+ `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
+ `Platform:` + strings.Replace(fmt.Sprintf("%v", this.Platform), "Platform", "Platform", 1) + `,`,
+ `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "Resources", 1) + `,`,
+ `Engine:` + strings.Replace(fmt.Sprintf("%v", this.Engine), "EngineDescription", "EngineDescription", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RaftMemberStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RaftMemberStatus{`,
+ `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`,
+ `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NodeStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NodeStatus{`,
+ `State:` + fmt.Sprintf("%v", this.State) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Image) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Image{`,
+ `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Mount) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Mount{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+ `Target:` + fmt.Sprintf("%v", this.Target) + `,`,
+ `Writable:` + fmt.Sprintf("%v", this.Writable) + `,`,
+ `BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`,
+ `VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Mount_BindOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Mount_BindOptions{`,
+ `Propagation:` + fmt.Sprintf("%v", this.Propagation) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Mount_VolumeOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&Mount_VolumeOptions{`,
+ `Populate:` + fmt.Sprintf("%v", this.Populate) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RestartPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RestartPolicy{`,
+ `Condition:` + fmt.Sprintf("%v", this.Condition) + `,`,
+ `Delay:` + strings.Replace(fmt.Sprintf("%v", this.Delay), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
+ `MaxAttempts:` + fmt.Sprintf("%v", this.MaxAttempts) + `,`,
+ `Window:` + strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UpdateConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateConfig{`,
+ `Parallelism:` + fmt.Sprintf("%v", this.Parallelism) + `,`,
+ `Delay:` + strings.Replace(strings.Replace(this.Delay.String(), "Duration", "docker_swarmkit_v11.Duration", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ContainerStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ContainerStatus{`,
+ `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+ `PID:` + fmt.Sprintf("%v", this.PID) + `,`,
+ `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TaskStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TaskStatus{`,
+ `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "docker_swarmkit_v1.Timestamp", 1) + `,`,
+ `State:` + fmt.Sprintf("%v", this.State) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Err:` + fmt.Sprintf("%v", this.Err) + `,`,
+ `RuntimeStatus:` + fmt.Sprintf("%v", this.RuntimeStatus) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TaskStatus_Container) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TaskStatus_Container{`,
+ `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerStatus", "ContainerStatus", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IPAMConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForReserved := make([]string, 0, len(this.Reserved))
+ for k, _ := range this.Reserved {
+ keysForReserved = append(keysForReserved, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForReserved)
+ mapStringForReserved := "map[string]string{"
+ for _, k := range keysForReserved {
+ mapStringForReserved += fmt.Sprintf("%v: %v,", k, this.Reserved[k])
+ }
+ mapStringForReserved += "}"
+ s := strings.Join([]string{`&IPAMConfig{`,
+ `Family:` + fmt.Sprintf("%v", this.Family) + `,`,
+ `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`,
+ `Range:` + fmt.Sprintf("%v", this.Range) + `,`,
+ `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`,
+ `Reserved:` + mapStringForReserved + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PortConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PortConfig{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+ `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`,
+ `PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Driver) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForOptions := make([]string, 0, len(this.Options))
+ for k, _ := range this.Options {
+ keysForOptions = append(keysForOptions, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+ mapStringForOptions := "map[string]string{"
+ for _, k := range keysForOptions {
+ mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k])
+ }
+ mapStringForOptions += "}"
+ s := strings.Join([]string{`&Driver{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Options:` + mapStringForOptions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IPAMOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IPAMOptions{`,
+ `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`,
+ `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "IPAMConfig", "IPAMConfig", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Peer) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Peer{`,
+ `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+ `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WeightedPeer) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WeightedPeer{`,
+ `Peer:` + strings.Replace(fmt.Sprintf("%v", this.Peer), "Peer", "Peer", 1) + `,`,
+ `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IssuanceStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IssuanceStatus{`,
+ `State:` + fmt.Sprintf("%v", this.State) + `,`,
+ `Err:` + fmt.Sprintf("%v", this.Err) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AcceptancePolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AcceptancePolicy{`,
+ `Policies:` + strings.Replace(fmt.Sprintf("%v", this.Policies), "AcceptancePolicy_RoleAdmissionPolicy", "AcceptancePolicy_RoleAdmissionPolicy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AcceptancePolicy_RoleAdmissionPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy{`,
+ `Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+ `Autoaccept:` + fmt.Sprintf("%v", this.Autoaccept) + `,`,
+ `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "AcceptancePolicy_RoleAdmissionPolicy_HashedSecret", "AcceptancePolicy_RoleAdmissionPolicy_HashedSecret", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{`,
+ `Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+ `Alg:` + fmt.Sprintf("%v", this.Alg) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CAConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CAConfig{`,
+ `NodeCertExpiry:` + strings.Replace(fmt.Sprintf("%v", this.NodeCertExpiry), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OrchestrationConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&OrchestrationConfig{`,
+ `TaskHistoryRetentionLimit:` + fmt.Sprintf("%v", this.TaskHistoryRetentionLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DispatcherConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DispatcherConfig{`,
+ `HeartbeatPeriod:` + fmt.Sprintf("%v", this.HeartbeatPeriod) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RaftConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RaftConfig{`,
+ `SnapshotInterval:` + fmt.Sprintf("%v", this.SnapshotInterval) + `,`,
+ `KeepOldSnapshots:` + fmt.Sprintf("%v", this.KeepOldSnapshots) + `,`,
+ `LogEntriesForSlowFollowers:` + fmt.Sprintf("%v", this.LogEntriesForSlowFollowers) + `,`,
+ `HeartbeatTick:` + fmt.Sprintf("%v", this.HeartbeatTick) + `,`,
+ `ElectionTick:` + fmt.Sprintf("%v", this.ElectionTick) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RaftMember) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RaftMember{`,
+ `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`,
+ `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RaftMemberStatus", "RaftMemberStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Placement) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Placement{`,
+ `Constraints:` + fmt.Sprintf("%v", this.Constraints) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RootCA) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RootCA{`,
+ `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`,
+ `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`,
+ `CACertHash:` + fmt.Sprintf("%v", this.CACertHash) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Certificate) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Certificate{`,
+ `Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+ `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IssuanceStatus", "IssuanceStatus", 1), `&`, ``, 1) + `,`,
+ `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`,
+ `CN:` + fmt.Sprintf("%v", this.CN) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EncryptionKey) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EncryptionKey{`,
+ `Subsystem:` + fmt.Sprintf("%v", this.Subsystem) + `,`,
+ `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `LamportTime:` + fmt.Sprintf("%v", this.LamportTime) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ManagerStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ManagerStatus{`,
+ `Raft:` + strings.Replace(strings.Replace(this.Raft.String(), "RaftMember", "RaftMember", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringTypes(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Version) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Version: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Annotations) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Annotations: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Annotations: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Resources) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Resources: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Resources: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NanoCPUs", wireType)
+ }
+ m.NanoCPUs = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.NanoCPUs |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytes", wireType)
+ }
+ m.MemoryBytes = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.MemoryBytes |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceRequirements) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Limits == nil {
+ m.Limits = &Resources{}
+ }
+ if err := m.Limits.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reservations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Reservations == nil {
+ m.Reservations = &Resources{}
+ }
+ if err := m.Reservations.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Platform) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Platform: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Architecture = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OS = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PluginDescription) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginDescription: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginDescription: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EngineDescription) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EngineDescription: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EngineDescription: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EngineVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EngineVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Plugins = append(m.Plugins, PluginDescription{})
+ if err := m.Plugins[len(m.Plugins)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeDescription) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeDescription: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeDescription: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Platform == nil {
+ m.Platform = &Platform{}
+ }
+ if err := m.Platform.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Resources == nil {
+ m.Resources = &Resources{}
+ }
+ if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Engine", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Engine == nil {
+ m.Engine = &EngineDescription{}
+ }
+ if err := m.Engine.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RaftMemberStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RaftMemberStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RaftMemberStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Leader = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType)
+ }
+ m.Reachability = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ m.State = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.State |= (NodeStatus_State(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Image) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Image: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reference = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Mount) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Mount: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (Mount_MountType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Source = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Target = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Writable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Writable = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BindOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.BindOptions == nil {
+ m.BindOptions = &Mount_BindOptions{}
+ }
+ if err := m.BindOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VolumeOptions == nil {
+ m.VolumeOptions = &Mount_VolumeOptions{}
+ }
+ if err := m.VolumeOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Mount_BindOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BindOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BindOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Propagation", wireType)
+ }
+ m.Propagation = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Propagation |= (Mount_BindOptions_MountPropagation(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Mount_VolumeOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VolumeOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VolumeOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Populate", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Populate = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DriverConfig == nil {
+ m.DriverConfig = &Driver{}
+ }
+ if err := m.DriverConfig.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RestartPolicy) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RestartPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RestartPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType)
+ }
+ m.Condition = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Condition |= (RestartPolicy_RestartCondition(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Delay == nil {
+ m.Delay = &docker_swarmkit_v11.Duration{}
+ }
+ if err := m.Delay.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxAttempts", wireType)
+ }
+ m.MaxAttempts = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.MaxAttempts |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Window == nil {
+ m.Window = &docker_swarmkit_v11.Duration{}
+ }
+ if err := m.Window.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType)
+ }
+ m.Parallelism = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Parallelism |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Delay.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType)
+ }
+ m.PID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.PID |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType)
+ }
+ m.ExitCode = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ExitCode |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TaskStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Timestamp == nil {
+ m.Timestamp = &docker_swarmkit_v1.Timestamp{}
+ }
+ if err := m.Timestamp.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ m.State = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.State |= (TaskState(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Err = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &ContainerStatus{}
+ if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RuntimeStatus = &TaskStatus_Container{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IPAMConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IPAMConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IPAMConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Family", wireType)
+ }
+ m.Family = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Family |= (IPAMConfig_AddressFamily(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subnet = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Range = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Gateway = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Reserved == nil {
+ m.Reserved = make(map[string]string)
+ }
+ m.Reserved[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PortConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PortConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ m.Protocol = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
+ }
+ m.TargetPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.TargetPort |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType)
+ }
+ m.PublishedPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.PublishedPort |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Driver) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Driver: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Driver: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Options == nil {
+ m.Options = make(map[string]string)
+ }
+ m.Options[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IPAMOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IPAMOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IPAMOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Driver == nil {
+ m.Driver = &Driver{}
+ }
+ if err := m.Driver.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Configs = append(m.Configs, &IPAMConfig{})
+ if err := m.Configs[len(m.Configs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Peer) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Peer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addr = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WeightedPeer) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WeightedPeer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WeightedPeer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Peer == nil {
+ m.Peer = &Peer{}
+ }
+ if err := m.Peer.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
+ }
+ m.Weight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Weight |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IssuanceStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IssuanceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IssuanceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ m.State = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.State |= (IssuanceStatus_State(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Err = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AcceptancePolicy) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AcceptancePolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AcceptancePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Policies = append(m.Policies, &AcceptancePolicy_RoleAdmissionPolicy{})
+ if err := m.Policies[len(m.Policies)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AcceptancePolicy_RoleAdmissionPolicy) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleAdmissionPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ m.Role = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Role |= (NodeRole(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Autoaccept", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Autoaccept = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Secret == nil {
+ m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{}
+ }
+ if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AcceptancePolicy_RoleAdmissionPolicy_HashedSecret) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashedSecret: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashedSecret: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alg", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Alg = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CAConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CAConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CAConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeCertExpiry", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeCertExpiry == nil {
+ m.NodeCertExpiry = &docker_swarmkit_v11.Duration{}
+ }
+ if err := m.NodeCertExpiry.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OrchestrationConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OrchestrationConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OrchestrationConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TaskHistoryRetentionLimit", wireType)
+ }
+ m.TaskHistoryRetentionLimit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.TaskHistoryRetentionLimit |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DispatcherConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DispatcherConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DispatcherConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatPeriod", wireType)
+ }
+ m.HeartbeatPeriod = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.HeartbeatPeriod |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RaftConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RaftConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RaftConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterval", wireType)
+ }
+ m.SnapshotInterval = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.SnapshotInterval |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeepOldSnapshots", wireType)
+ }
+ m.KeepOldSnapshots = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.KeepOldSnapshots |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogEntriesForSlowFollowers", wireType)
+ }
+ m.LogEntriesForSlowFollowers = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LogEntriesForSlowFollowers |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTick", wireType)
+ }
+ m.HeartbeatTick = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.HeartbeatTick |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ElectionTick", wireType)
+ }
+ m.ElectionTick = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ElectionTick |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RaftMember) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RaftMember: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RaftMember: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType)
+ }
+ m.RaftID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.RaftID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addr = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Placement) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Placement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Placement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Constraints = append(m.Constraints, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RootCA) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RootCA: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RootCA: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CAKey = append(m.CAKey[:0], data[iNdEx:postIndex]...)
+ if m.CAKey == nil {
+ m.CAKey = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CACert = append(m.CACert[:0], data[iNdEx:postIndex]...)
+ if m.CACert == nil {
+ m.CACert = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CACertHash", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CACertHash = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Certificate) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Certificate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ m.Role = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Role |= (NodeRole(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CSR = append(m.CSR[:0], data[iNdEx:postIndex]...)
+ if m.CSR == nil {
+ m.CSR = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...)
+ if m.Certificate == nil {
+ m.Certificate = []byte{}
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CN", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CN = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EncryptionKey) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EncryptionKey: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EncryptionKey: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subsystem", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subsystem = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType)
+ }
+ m.Algorithm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Algorithm |= (EncryptionKey_Algorithm(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LamportTime", wireType)
+ }
+ m.LamportTime = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.LamportTime |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ManagerStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ManagerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ManagerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Raft", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Raft.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTypes(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthTypes
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipTypes(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorTypes = []byte{
+ // 2940 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x58, 0xcb, 0x73, 0x1b, 0xc7,
+ 0xd1, 0x27, 0x9e, 0x04, 0x06, 0x20, 0x09, 0xad, 0x64, 0x99, 0x82, 0xf9, 0x51, 0xfa, 0xd6, 0x52,
+ 0x2c, 0x3f, 0x02, 0x5b, 0xb4, 0x93, 0x52, 0xec, 0x72, 0xec, 0xc5, 0x83, 0x22, 0x22, 0x12, 0x44,
+ 0x0d, 0x48, 0xaa, 0x7c, 0x48, 0x50, 0xcb, 0xc5, 0x90, 0x58, 0x73, 0xb1, 0x8b, 0xec, 0x2e, 0x48,
+ 0xa1, 0x52, 0xa9, 0x92, 0x73, 0x49, 0xe2, 0x53, 0xee, 0x29, 0x57, 0x2a, 0x95, 0x5c, 0xf3, 0x0f,
+ 0xe4, 0xa4, 0xa3, 0x8e, 0x49, 0xa5, 0x2a, 0xe5, 0x93, 0x2b, 0x76, 0x0e, 0xb9, 0xa6, 0x2a, 0xa9,
+ 0xf8, 0x90, 0x1c, 0xd2, 0x3d, 0x8f, 0xc5, 0x43, 0x2b, 0x5a, 0x8a, 0x0f, 0x28, 0xec, 0xf4, 0xfc,
+ 0xba, 0xa7, 0xbb, 0xa7, 0xa7, 0xbb, 0x67, 0x48, 0x21, 0x1c, 0x0f, 0x59, 0x50, 0x19, 0xfa, 0x5e,
+ 0xe8, 0x69, 0x5a, 0xcf, 0xb3, 0x4e, 0x98, 0x5f, 0x09, 0xce, 0x4c, 0x7f, 0x70, 0x62, 0x87, 0x95,
+ 0xd3, 0x5b, 0xe5, 0x2b, 0xa1, 0x3d, 0x60, 0x41, 0x68, 0x0e, 0x86, 0xaf, 0x47, 0x5f, 0x02, 0x5e,
+ 0x7e, 0xbe, 0x37, 0xf2, 0xcd, 0xd0, 0xf6, 0xdc, 0xd7, 0xd5, 0x87, 0x9c, 0xb8, 0x74, 0xec, 0x1d,
+ 0x7b, 0xfc, 0xf3, 0x75, 0xfc, 0x12, 0x54, 0xfd, 0x2a, 0x59, 0x3c, 0x60, 0x7e, 0x00, 0x30, 0xed,
+ 0x12, 0xc9, 0xd8, 0x6e, 0x8f, 0xdd, 0x5f, 0x4d, 0x5c, 0x4b, 0xdc, 0x4c, 0x53, 0x31, 0xd0, 0x7f,
+ 0x9d, 0x20, 0x05, 0xc3, 0x75, 0xbd, 0x90, 0xcb, 0x0a, 0x34, 0x8d, 0xa4, 0x5d, 0x73, 0xc0, 0x38,
+ 0x28, 0x4f, 0xf9, 0xb7, 0x56, 0x23, 0x59, 0xc7, 0x3c, 0x64, 0x4e, 0xb0, 0x9a, 0xbc, 0x96, 0xba,
+ 0x59, 0xd8, 0x78, 0xb5, 0xf2, 0xb8, 0xce, 0x95, 0x29, 0x21, 0x95, 0x6d, 0x8e, 0x6e, 0xb8, 0xa1,
+ 0x3f, 0xa6, 0x92, 0xb5, 0xfc, 0x1d, 0x52, 0x98, 0x22, 0x6b, 0x25, 0x92, 0x3a, 0x61, 0x63, 0xb9,
+ 0x0c, 0x7e, 0xa2, 0x7e, 0xa7, 0xa6, 0x33, 0x62, 0xb0, 0x08, 0xd2, 0xc4, 0xe0, 0xed, 0xe4, 0xed,
+ 0x84, 0xfe, 0x01, 0xc9, 0x53, 0x16, 0x78, 0x23, 0xdf, 0x62, 0x81, 0xf6, 0x32, 0xc9, 0xbb, 0xa6,
+ 0xeb, 0x75, 0xad, 0xe1, 0x28, 0xe0, 0xec, 0xa9, 0x6a, 0xf1, 0x8b, 0xcf, 0xae, 0xe6, 0x5a, 0x40,
+ 0xac, 0xb5, 0xf7, 0x03, 0x9a, 0xc3, 0xe9, 0x1a, 0xcc, 0x6a, 0xff, 0x4f, 0x8a, 0x03, 0x36, 0xf0,
+ 0xfc, 0x71, 0xf7, 0x70, 0x1c, 0xb2, 0x80, 0x0b, 0x4e, 0xd1, 0x82, 0xa0, 0x55, 0x91, 0xa4, 0xff,
+ 0x22, 0x41, 0x2e, 0x29, 0xd9, 0x94, 0xfd, 0x70, 0x64, 0xfb, 0x6c, 0xc0, 0xdc, 0x30, 0xd0, 0xbe,
+ 0x05, 0x36, 0xdb, 0x03, 0x3b, 0x14, 0x6b, 0x14, 0x36, 0xfe, 0x2f, 0xce, 0xe6, 0x48, 0x2b, 0x2a,
+ 0xc1, 0x9a, 0x41, 0x8a, 0x3e, 0x0b, 0x98, 0x7f, 0x2a, 0x3c, 0xc1, 0x97, 0xfc, 0x4a, 0xe6, 0x19,
+ 0x16, 0x7d, 0x93, 0xe4, 0xda, 0x8e, 0x19, 0x1e, 0x79, 0xfe, 0x40, 0xd3, 0x49, 0xd1, 0xf4, 0xad,
+ 0xbe, 0x1d, 0x32, 0x2b, 0x1c, 0xf9, 0x6a, 0x57, 0x66, 0x68, 0xda, 0x65, 0x92, 0xf4, 0xc4, 0x42,
+ 0xf9, 0x6a, 0x16, 0x3c, 0x91, 0xdc, 0xed, 0x50, 0xa0, 0xe8, 0xef, 0x90, 0x0b, 0x6d, 0x67, 0x74,
+ 0x6c, 0xbb, 0x75, 0x16, 0x58, 0xbe, 0x3d, 0x44, 0xe9, 0xb8, 0xbd, 0x18, 0x7c, 0x6a, 0x7b, 0xf1,
+ 0x3b, 0xda, 0xf2, 0xe4, 0x64, 0xcb, 0xf5, 0x9f, 0x26, 0xc9, 0x85, 0x86, 0x0b, 0xcc, 0x6c, 0x9a,
+ 0xfb, 0x06, 0x59, 0x66, 0x9c, 0xd8, 0x3d, 0x15, 0x41, 0x25, 0xe5, 0x2c, 0x09, 0xaa, 0x8a, 0xb4,
+ 0xe6, 0x5c, 0xbc, 0xdc, 0x8a, 0x33, 0xff, 0x31, 0xe9, 0x71, 0x51, 0xa3, 0x35, 0xc8, 0xe2, 0x90,
+ 0x1b, 0x11, 0xac, 0xa6, 0xb8, 0xac, 0x1b, 0x71, 0xb2, 0x1e, 0xb3, 0xb3, 0x9a, 0x7e, 0xf4, 0xd9,
+ 0xd5, 0x05, 0xaa, 0x78, 0xbf, 0x4e, 0xf0, 0xfd, 0x35, 0x41, 0x56, 0x5a, 0x5e, 0x6f, 0xc6, 0x0f,
+ 0x65, 0x92, 0xeb, 0x7b, 0x41, 0x38, 0x75, 0x50, 0xa2, 0xb1, 0x76, 0x9b, 0xe4, 0x86, 0x72, 0xfb,
+ 0xe4, 0xee, 0xaf, 0xc5, 0xab, 0x2c, 0x30, 0x34, 0x42, 0x6b, 0xef, 0x90, 0xbc, 0xaf, 0x62, 0x02,
+ 0xac, 0x7d, 0x8a, 0xc0, 0x99, 0xe0, 0xb5, 0x77, 0x49, 0x56, 0x6c, 0xc2, 0x6a, 0x9a, 0x73, 0xde,
+ 0x78, 0x2a, 0x9f, 0x53, 0xc9, 0xa4, 0x7f, 0x9a, 0x20, 0x25, 0x6a, 0x1e, 0x85, 0x3b, 0x6c, 0x70,
+ 0xc8, 0xfc, 0x0e, 0x1c, 0x64, 0x38, 0x3f, 0x97, 0x61, 0x1f, 0x99, 0xd9, 0x63, 0x3e, 0x37, 0x32,
+ 0x47, 0xe5, 0x48, 0xdb, 0xc7, 0x20, 0x37, 0xad, 0xbe, 0x79, 0x68, 0x3b, 0x76, 0x38, 0xe6, 0x66,
+ 0x2e, 0xc7, 0xef, 0xf2, 0xbc, 0x4c, 0x50, 0x7e, 0xc2, 0x48, 0x67, 0xc4, 0x68, 0xab, 0x64, 0x11,
+ 0x72, 0x5d, 0x60, 0x1e, 0x33, 0x6e, 0x7d, 0x9e, 0xaa, 0x21, 0x84, 0x72, 0x71, 0x9a, 0x4f, 0x2b,
+ 0x90, 0xc5, 0xfd, 0xd6, 0xdd, 0xd6, 0xee, 0xbd, 0x56, 0x69, 0x41, 0x5b, 0x21, 0x85, 0xfd, 0x16,
+ 0x6d, 0x18, 0xb5, 0x2d, 0xa3, 0xba, 0xdd, 0x28, 0x25, 0xb4, 0x25, 0x48, 0x17, 0xd1, 0x30, 0xa9,
+ 0xff, 0x2a, 0x41, 0x08, 0x6e, 0xa0, 0x34, 0xea, 0x6d, 0x92, 0x81, 0x7c, 0x1a, 0x8a, 0x8d, 0x5b,
+ 0xde, 0xb8, 0x1e, 0xa7, 0xf5, 0x04, 0x5e, 0xc1, 0x3f, 0x46, 0x05, 0xcb, 0xb4, 0x86, 0xc9, 0x79,
+ 0x0d, 0x33, 0x1c, 0x39, 0xab, 0x5a, 0x8e, 0xa4, 0xeb, 0xf8, 0x95, 0xd0, 0xf2, 0x24, 0x03, 0x3a,
+ 0xd5, 0x3f, 0x28, 0x25, 0x21, 0xf8, 0x8a, 0xf5, 0x66, 0xa7, 0xb6, 0xdb, 0x6a, 0x35, 0x6a, 0x7b,
+ 0x8d, 0x7a, 0x29, 0xa5, 0xdf, 0x20, 0x99, 0xe6, 0x00, 0xa4, 0x68, 0x6b, 0x18, 0x01, 0x47, 0xcc,
+ 0x67, 0xae, 0xa5, 0x02, 0x6b, 0x42, 0xd0, 0xff, 0xb5, 0x48, 0x32, 0x3b, 0xde, 0xc8, 0x0d, 0xb5,
+ 0x8d, 0xa9, 0x53, 0xbc, 0xbc, 0xb1, 0x1e, 0x67, 0x02, 0x07, 0x56, 0xf6, 0x00, 0x25, 0x4f, 0x39,
+ 0x6c, 0xa6, 0x88, 0x15, 0xa9, 0xba, 0x1c, 0x21, 0x3d, 0x34, 0xfd, 0x63, 0x16, 0x4a, 0xa7, 0xcb,
+ 0x11, 0xc6, 0xf8, 0x99, 0x6f, 0x87, 0xe6, 0xa1, 0x23, 0x42, 0x2a, 0x47, 0xa3, 0xb1, 0xb6, 0x45,
+ 0x8a, 0x87, 0x50, 0x3e, 0xba, 0xde, 0x50, 0x64, 0xb9, 0xcc, 0x93, 0x43, 0x4e, 0xe8, 0x51, 0x05,
+ 0xf4, 0xae, 0x00, 0xd3, 0xc2, 0xe1, 0x64, 0xa0, 0xb5, 0xc8, 0xf2, 0xa9, 0xe7, 0x8c, 0x06, 0x2c,
+ 0x92, 0x95, 0xe5, 0xb2, 0x5e, 0x7a, 0xb2, 0xac, 0x03, 0x8e, 0x57, 0xd2, 0x96, 0x4e, 0xa7, 0x87,
+ 0xe5, 0x9f, 0xa4, 0x48, 0x61, 0x6a, 0x31, 0xad, 0x43, 0x0a, 0x50, 0x08, 0x87, 0xe6, 0x31, 0x4f,
+ 0xae, 0xd2, 0x61, 0xb7, 0x9e, 0x4a, 0xd1, 0x4a, 0x7b, 0xc2, 0x48, 0xa7, 0xa5, 0xe8, 0x9f, 0x24,
+ 0x49, 0x61, 0x6a, 0x52, 0x7b, 0x85, 0xe4, 0x68, 0x9b, 0x36, 0x0f, 0x8c, 0xbd, 0x46, 0x69, 0xa1,
+ 0xbc, 0xf6, 0xf1, 0x27, 0xd7, 0x56, 0xb9, 0xb4, 0x69, 0x01, 0x6d, 0xdf, 0x3e, 0xc5, 0xf8, 0xb8,
+ 0x49, 0x16, 0x15, 0x34, 0x51, 0x7e, 0x01, 0xa0, 0xcf, 0xcf, 0x43, 0xa7, 0x90, 0xb4, 0xb3, 0x65,
+ 0x50, 0x08, 0x91, 0x64, 0x3c, 0x92, 0x76, 0xfa, 0xa6, 0xcf, 0x7a, 0xda, 0x37, 0x48, 0x56, 0x02,
+ 0x53, 0xe5, 0x32, 0x00, 0x2f, 0xcf, 0x03, 0x27, 0x38, 0xda, 0xd9, 0x36, 0x0e, 0x1a, 0xa5, 0x74,
+ 0x3c, 0x8e, 0x76, 0x1c, 0xf3, 0x94, 0x69, 0xd7, 0x21, 0x98, 0x39, 0x2c, 0x53, 0xbe, 0x02, 0xb0,
+ 0xe7, 0x1e, 0x13, 0x87, 0xa8, 0xf2, 0xea, 0xcf, 0x7e, 0xb3, 0xbe, 0xf0, 0xfb, 0xdf, 0xae, 0x97,
+ 0xe6, 0xa7, 0xcb, 0xff, 0x4c, 0x90, 0xa5, 0x99, 0x5d, 0xc2, 0x60, 0x1a, 0x7a, 0xc3, 0x91, 0xa3,
+ 0xce, 0x1d, 0x04, 0x93, 0x1a, 0x6b, 0x77, 0xe7, 0xaa, 0xc5, 0x9b, 0x4f, 0xb9, 0xf5, 0xb1, 0xf5,
+ 0xe2, 0x3d, 0xb2, 0xd4, 0x03, 0xff, 0x31, 0xbf, 0x6b, 0x79, 0xee, 0x91, 0x7d, 0x2c, 0xf3, 0x68,
+ 0x39, 0x4e, 0x66, 0x9d, 0x03, 0x69, 0x51, 0x30, 0xd4, 0x38, 0xfe, 0xeb, 0x54, 0x8a, 0x7b, 0x24,
+ 0x8d, 0xe7, 0x4d, 0x7b, 0x81, 0xa4, 0xab, 0xcd, 0x56, 0x1d, 0x42, 0xe1, 0x02, 0x78, 0x6f, 0x89,
+ 0xab, 0x8e, 0x13, 0x18, 0x5b, 0xda, 0x55, 0x92, 0x3d, 0xd8, 0xdd, 0xde, 0xdf, 0xc1, 0xed, 0xbf,
+ 0x08, 0xd3, 0x2b, 0xd1, 0xb4, 0x30, 0xae, 0x7c, 0x41, 0xba, 0x35, 0x1f, 0x4d, 0xe8, 0xff, 0x4e,
+ 0x92, 0x25, 0x8a, 0x5d, 0xa0, 0x1f, 0xb6, 0x3d, 0xc7, 0xb6, 0xc6, 0x5a, 0x9b, 0xe4, 0xc1, 0xbe,
+ 0x9e, 0x3d, 0x15, 0xd4, 0x1b, 0x4f, 0x28, 0x15, 0x13, 0x2e, 0x35, 0xaa, 0x29, 0x4e, 0x3a, 0x11,
+ 0x02, 0x29, 0x25, 0xd3, 0x63, 0x8e, 0x39, 0x3e, 0xaf, 0x66, 0xd5, 0x65, 0xc7, 0x49, 0x05, 0x94,
+ 0xf7, 0x57, 0xe6, 0xfd, 0xae, 0x19, 0x86, 0x6c, 0x30, 0x0c, 0x45, 0xcd, 0x4a, 0x43, 0x7f, 0x65,
+ 0xde, 0x37, 0x24, 0x49, 0x7b, 0x8b, 0x64, 0xcf, 0xc0, 0x6c, 0xef, 0x4c, 0x96, 0xa5, 0xf3, 0xe5,
+ 0x4a, 0xac, 0xfe, 0x31, 0x56, 0xa3, 0x39, 0x65, 0xd1, 0xad, 0xad, 0xdd, 0x56, 0x43, 0xb9, 0x55,
+ 0xce, 0xef, 0xba, 0x2d, 0xcf, 0xc5, 0x90, 0x25, 0xbb, 0xad, 0xee, 0xa6, 0xd1, 0xdc, 0xde, 0xa7,
+ 0xe8, 0xda, 0x4b, 0x00, 0x29, 0x45, 0x90, 0x4d, 0xd3, 0x76, 0xb0, 0x55, 0xba, 0x42, 0x52, 0x46,
+ 0x0b, 0x72, 0x70, 0xb9, 0x04, 0xd3, 0xc5, 0x68, 0xda, 0x70, 0xc7, 0x93, 0x68, 0x9e, 0x5f, 0x57,
+ 0xff, 0x90, 0x14, 0xf7, 0x87, 0x3d, 0x88, 0x54, 0x11, 0x21, 0xda, 0x35, 0x48, 0x29, 0xa6, 0x6f,
+ 0x3a, 0x0e, 0x73, 0xec, 0x60, 0x20, 0xbb, 0xe9, 0x69, 0x12, 0xb4, 0x00, 0x4f, 0xef, 0x4b, 0xd9,
+ 0xa9, 0x08, 0x06, 0xfd, 0xc7, 0x64, 0x05, 0x56, 0x09, 0x4d, 0x28, 0xc9, 0xaa, 0x08, 0x6f, 0x90,
+ 0xa2, 0xa5, 0x48, 0x5d, 0xbb, 0x27, 0x42, 0xb1, 0xba, 0x02, 0x8d, 0x5e, 0x21, 0x82, 0x36, 0xeb,
+ 0xb4, 0x10, 0x81, 0x9a, 0x3d, 0xb4, 0x73, 0x08, 0x50, 0x5c, 0x3e, 0x53, 0x5d, 0x04, 0x68, 0xaa,
+ 0x0d, 0x10, 0xa4, 0x81, 0x17, 0xf3, 0xec, 0xbe, 0x1d, 0xc2, 0xf1, 0xe8, 0x89, 0x32, 0x9b, 0xa1,
+ 0x39, 0x24, 0xd4, 0x60, 0xac, 0x7f, 0x94, 0x24, 0x64, 0xcf, 0x0c, 0x4e, 0xe4, 0xd2, 0xd0, 0x90,
+ 0x44, 0xd7, 0x8f, 0xf3, 0xda, 0xe0, 0x3d, 0x05, 0xa2, 0x13, 0xbc, 0xf6, 0xa6, 0xaa, 0xb3, 0xa2,
+ 0x3b, 0x88, 0x67, 0x94, 0x6b, 0xc5, 0x15, 0xd8, 0xd9, 0x16, 0x00, 0x0f, 0x22, 0xf3, 0x7d, 0x1e,
+ 0x45, 0x70, 0x10, 0xe1, 0x13, 0x6e, 0x25, 0xf9, 0xc8, 0x66, 0x59, 0x81, 0x5e, 0x8c, 0x5b, 0x64,
+ 0xce, 0xa1, 0x5b, 0x0b, 0x74, 0xc2, 0x57, 0x2d, 0x91, 0x65, 0x1f, 0x8e, 0x19, 0x68, 0xdd, 0x0d,
+ 0xf8, 0xb4, 0xfe, 0x27, 0xf0, 0x41, 0xb3, 0x6d, 0xec, 0xc8, 0xdd, 0xae, 0x93, 0xec, 0x91, 0x39,
+ 0xb0, 0x9d, 0xb1, 0x3c, 0x66, 0xaf, 0xc5, 0x2d, 0x31, 0xc1, 0x57, 0x8c, 0x5e, 0x0f, 0x9a, 0xb2,
+ 0x60, 0x93, 0xf3, 0x50, 0xc9, 0xcb, 0x8b, 0xef, 0xe8, 0xd0, 0x85, 0x22, 0xab, 0x8a, 0x2f, 0x1f,
+ 0x61, 0x32, 0xf1, 0x4d, 0x37, 0xb2, 0x56, 0x0c, 0xd0, 0x0b, 0x90, 0x49, 0xd9, 0x19, 0x44, 0x90,
+ 0xb0, 0x57, 0x0d, 0xa1, 0xf0, 0xe6, 0xc4, 0x5d, 0x81, 0xf5, 0xc0, 0x64, 0xcc, 0x96, 0x5f, 0xa5,
+ 0x0f, 0x95, 0x70, 0x91, 0x26, 0x23, 0xee, 0xf2, 0x3b, 0x3c, 0xa5, 0x4c, 0xa6, 0x9e, 0x29, 0xd3,
+ 0xbd, 0x41, 0x96, 0x66, 0xec, 0x7c, 0xac, 0xeb, 0x69, 0xb6, 0x0f, 0xde, 0x2a, 0xa5, 0xe5, 0xd7,
+ 0xb7, 0x4b, 0x59, 0xfd, 0x1f, 0xd0, 0x84, 0xb5, 0x3d, 0x7e, 0xac, 0xd0, 0xab, 0xf1, 0xb7, 0xcc,
+ 0x1c, 0xbf, 0xb3, 0x5a, 0x9e, 0x23, 0x63, 0x26, 0xb6, 0x09, 0x98, 0x48, 0xc1, 0x02, 0xcd, 0xe1,
+ 0x34, 0x62, 0x84, 0xf4, 0x5a, 0x10, 0xfd, 0x4b, 0x77, 0x08, 0x38, 0xee, 0xd6, 0x25, 0x4a, 0x04,
+ 0x09, 0x39, 0xf1, 0x0a, 0x33, 0x1c, 0x1d, 0xc2, 0x31, 0xed, 0xb3, 0x9e, 0xc0, 0xa4, 0x39, 0x66,
+ 0x29, 0xa2, 0x22, 0x4c, 0xaf, 0xc3, 0x25, 0x4c, 0xc9, 0x5c, 0x25, 0xa9, 0xbd, 0x5a, 0x1b, 0xf2,
+ 0xce, 0x0a, 0x64, 0x8d, 0x82, 0x22, 0x03, 0x09, 0x67, 0xf6, 0xeb, 0x6d, 0x48, 0x37, 0x33, 0x33,
+ 0x40, 0x2a, 0xa7, 0x31, 0x9d, 0xe8, 0xbf, 0x4c, 0x90, 0xac, 0xa8, 0x32, 0xb1, 0x16, 0x1b, 0x64,
+ 0x51, 0x75, 0x3d, 0xa2, 0xf4, 0xbd, 0xf4, 0xe4, 0x32, 0x55, 0x91, 0x55, 0x4f, 0xec, 0xa3, 0xe2,
+ 0x2b, 0xbf, 0x4d, 0x8a, 0xd3, 0x13, 0xcf, 0xb4, 0x8b, 0x3f, 0x22, 0x05, 0x0c, 0x14, 0x55, 0xa3,
+ 0x37, 0x48, 0x56, 0x54, 0x42, 0x79, 0xd4, 0xcf, 0xab, 0x99, 0x12, 0x09, 0x99, 0x6e, 0x51, 0xd4,
+ 0x59, 0x75, 0x3d, 0x5b, 0x3f, 0x3f, 0x1c, 0xa9, 0x82, 0xeb, 0xef, 0x91, 0x74, 0x9b, 0x81, 0x84,
+ 0x17, 0xc9, 0xa2, 0x0b, 0xa9, 0x67, 0x92, 0xd9, 0x08, 0xa4, 0xab, 0x2c, 0x36, 0xe0, 0x90, 0xb1,
+ 0xb2, 0x38, 0x05, 0xf9, 0x0c, 0x9c, 0x67, 0x42, 0xbc, 0xa9, 0x1b, 0x2a, 0x7e, 0xeb, 0x7b, 0xa4,
+ 0x78, 0x8f, 0xd9, 0xc7, 0xfd, 0x10, 0x76, 0x0c, 0x05, 0xbd, 0x46, 0xd2, 0x43, 0x16, 0x29, 0xbf,
+ 0x1a, 0x1b, 0x3a, 0x30, 0x4f, 0x39, 0x0a, 0x0f, 0xe4, 0x19, 0xe7, 0x96, 0x8f, 0x02, 0x72, 0xa4,
+ 0xff, 0x2e, 0x49, 0x96, 0x9b, 0x41, 0x30, 0x32, 0xa1, 0xe1, 0x96, 0x59, 0xf0, 0xbb, 0xb3, 0x17,
+ 0x86, 0x9b, 0xb1, 0x16, 0xce, 0xb0, 0xcc, 0x5e, 0x1a, 0x64, 0xe6, 0x4a, 0x46, 0x99, 0x4b, 0x7f,
+ 0x94, 0x50, 0xb7, 0x85, 0x1b, 0x53, 0xe7, 0xa6, 0xbc, 0x0a, 0x41, 0x74, 0x69, 0x5a, 0x12, 0xdb,
+ 0x77, 0x4f, 0x5c, 0xef, 0xcc, 0x85, 0x42, 0x0b, 0xb7, 0x87, 0x56, 0xe3, 0x1e, 0x44, 0xda, 0x65,
+ 0x00, 0x69, 0x33, 0x20, 0xca, 0x5c, 0x76, 0x86, 0x92, 0xda, 0x8d, 0x56, 0xbd, 0xd9, 0xba, 0x03,
+ 0xe5, 0xed, 0x71, 0x49, 0x6d, 0x06, 0xe5, 0xcc, 0x3d, 0x06, 0x77, 0x67, 0x9b, 0x9d, 0xce, 0x3e,
+ 0x6f, 0x15, 0x9f, 0x07, 0xd4, 0xc5, 0x19, 0x14, 0x0e, 0xa0, 0x4f, 0x04, 0x10, 0x56, 0x52, 0x00,
+ 0xa5, 0x63, 0x40, 0x58, 0x4c, 0x21, 0x81, 0x88, 0x08, 0xff, 0x5b, 0x92, 0x94, 0x0c, 0xcb, 0x62,
+ 0xc3, 0x10, 0xe7, 0x65, 0x77, 0xb2, 0x87, 0xdd, 0x1e, 0x7c, 0xd9, 0x0c, 0x5f, 0x4f, 0x30, 0x2c,
+ 0x6e, 0xc7, 0xbe, 0x18, 0xcd, 0xf1, 0x55, 0xa8, 0xe7, 0x30, 0xa3, 0x37, 0xb0, 0x03, 0x7c, 0x45,
+ 0x10, 0x34, 0x1a, 0x49, 0x2a, 0xff, 0x27, 0x41, 0x2e, 0xc6, 0x20, 0xb4, 0x37, 0x48, 0xda, 0x07,
+ 0xb2, 0xdc, 0x9e, 0xb5, 0x27, 0xdd, 0xe7, 0x90, 0x95, 0x72, 0xa4, 0xb6, 0x4e, 0x88, 0x39, 0x0a,
+ 0x3d, 0x93, 0xaf, 0xcf, 0x37, 0x26, 0x47, 0xa7, 0x28, 0xda, 0xf7, 0x21, 0x5b, 0x33, 0xcb, 0x97,
+ 0x57, 0xa2, 0xc2, 0x46, 0xe3, 0x7f, 0xd5, 0xbe, 0xb2, 0x65, 0x62, 0x46, 0xe9, 0x70, 0x61, 0x54,
+ 0x0a, 0x2d, 0xbf, 0x45, 0x8a, 0xd3, 0x74, 0x8c, 0x6e, 0x68, 0x2f, 0x4c, 0x6e, 0x40, 0x91, 0xf2,
+ 0x6f, 0x0c, 0x1a, 0xd3, 0x39, 0x56, 0x41, 0x03, 0x9f, 0x3a, 0x25, 0xb9, 0x9a, 0x21, 0xd3, 0xe7,
+ 0x26, 0x29, 0xf1, 0x43, 0x63, 0x31, 0x3f, 0xec, 0xb2, 0xfb, 0x43, 0xdb, 0x1f, 0xcb, 0xb8, 0x3f,
+ 0xbf, 0xbf, 0x5a, 0x46, 0xae, 0x1a, 0x30, 0x35, 0x38, 0x8f, 0x7e, 0x40, 0x2e, 0xee, 0xfa, 0x56,
+ 0x1f, 0x2a, 0xb6, 0x00, 0x48, 0xf1, 0xef, 0x91, 0xb5, 0x10, 0x2a, 0x73, 0xb7, 0x6f, 0x07, 0x21,
+ 0xbe, 0x9e, 0x81, 0x92, 0xcc, 0xc5, 0xf9, 0x2e, 0x7f, 0xe5, 0x12, 0xaf, 0x6e, 0xf4, 0x0a, 0x62,
+ 0xb6, 0x04, 0x84, 0x2a, 0xc4, 0x36, 0x02, 0xf4, 0x77, 0x49, 0xa9, 0x6e, 0x07, 0x43, 0x33, 0x04,
+ 0xd9, 0xb2, 0xb1, 0xd6, 0x5e, 0x26, 0xa5, 0x3e, 0x83, 0xc6, 0xea, 0x90, 0x99, 0x90, 0x9c, 0x99,
+ 0x6f, 0x7b, 0x3d, 0xd9, 0x3b, 0xad, 0x44, 0xf4, 0x36, 0x27, 0xeb, 0x5f, 0x42, 0xb1, 0xc0, 0x87,
+ 0x03, 0xc9, 0xf9, 0x2a, 0xb9, 0x10, 0xb8, 0xe6, 0x30, 0xe8, 0x7b, 0x61, 0xd7, 0x76, 0x43, 0x7c,
+ 0x29, 0x73, 0x24, 0x6b, 0x49, 0x4d, 0x34, 0x25, 0x1d, 0xd2, 0x80, 0x76, 0xc2, 0xd8, 0xb0, 0xeb,
+ 0x39, 0xbd, 0xae, 0x9a, 0x14, 0xaf, 0x63, 0x80, 0xc6, 0x99, 0x5d, 0xa7, 0xd7, 0x51, 0x74, 0xad,
+ 0x4a, 0xd6, 0x1d, 0xef, 0xb8, 0x0b, 0xba, 0xfb, 0x10, 0x62, 0xdd, 0x23, 0xcf, 0xef, 0x06, 0x8e,
+ 0x77, 0x06, 0x1f, 0x0e, 0xfc, 0x31, 0x5f, 0xf5, 0xb4, 0x65, 0x40, 0x35, 0x04, 0x68, 0xd3, 0xf3,
+ 0x3b, 0x30, 0xb7, 0xa9, 0x10, 0x58, 0x51, 0x26, 0x86, 0x85, 0xb6, 0x75, 0xa2, 0x2a, 0x4a, 0x44,
+ 0xdd, 0x03, 0x22, 0x1c, 0xaa, 0x25, 0xe6, 0x30, 0x8b, 0xbb, 0x91, 0xa3, 0x32, 0x1c, 0x55, 0x54,
+ 0x44, 0x04, 0xe9, 0x3f, 0x97, 0x96, 0x8b, 0x27, 0x13, 0x4c, 0x8e, 0x3e, 0x8c, 0x54, 0x72, 0x4c,
+ 0x8b, 0xe4, 0x88, 0x00, 0x4c, 0x8e, 0x38, 0x15, 0x9f, 0x1c, 0xc1, 0xae, 0xac, 0x68, 0x67, 0x64,
+ 0x04, 0x5f, 0x7f, 0x9a, 0xb7, 0x19, 0xd9, 0x8a, 0x4a, 0x4e, 0xfd, 0x9b, 0x24, 0xdf, 0x76, 0x4c,
+ 0x8b, 0xbf, 0x87, 0x62, 0xd3, 0x0b, 0x99, 0x1b, 0xe3, 0x04, 0x76, 0x40, 0x9c, 0xea, 0x3c, 0x9d,
+ 0x26, 0xe9, 0x1f, 0x41, 0xad, 0xa3, 0x9e, 0x17, 0xd6, 0x0c, 0x00, 0x67, 0x2d, 0xb3, 0xab, 0xea,
+ 0x50, 0xb1, 0x9a, 0x07, 0xad, 0x33, 0x35, 0xe3, 0x2e, 0x1b, 0xd3, 0x8c, 0x65, 0xc2, 0x1f, 0x1a,
+ 0x06, 0x08, 0x0c, 0x5f, 0xae, 0x76, 0x51, 0x18, 0x06, 0xf1, 0x0d, 0x14, 0x0a, 0xcc, 0xf8, 0x0f,
+ 0x07, 0xbb, 0x28, 0x41, 0xdd, 0x3e, 0x9c, 0x17, 0xd1, 0x23, 0x55, 0x97, 0x01, 0x49, 0x04, 0x12,
+ 0x4f, 0x11, 0x25, 0x02, 0x8d, 0xdf, 0xfa, 0x9f, 0x13, 0xa4, 0x80, 0x03, 0xfb, 0xc8, 0xb6, 0x30,
+ 0xbd, 0x3e, 0x7b, 0x6a, 0x80, 0xce, 0xd9, 0x0a, 0x7c, 0xa9, 0x14, 0xef, 0x9c, 0x6b, 0x1d, 0x4a,
+ 0x91, 0xa6, 0xbd, 0x3f, 0xe7, 0x53, 0xfd, 0xab, 0x0b, 0xc1, 0xac, 0x47, 0xb9, 0x13, 0x27, 0xda,
+ 0xf1, 0x30, 0x29, 0xd2, 0x69, 0x12, 0xbe, 0xe5, 0x5a, 0x2e, 0x8f, 0x0c, 0xf9, 0x96, 0x5b, 0x6b,
+ 0x51, 0xa0, 0xe8, 0x7f, 0x84, 0x1b, 0x75, 0xc3, 0xb5, 0xfc, 0x31, 0xaf, 0xd5, 0xe8, 0xc1, 0x35,
+ 0x92, 0x87, 0x1e, 0x32, 0x18, 0x07, 0x70, 0xd1, 0x52, 0x4f, 0x45, 0x11, 0x41, 0x6b, 0x92, 0x3c,
+ 0xe4, 0x0c, 0xcf, 0xb7, 0xc3, 0xfe, 0x40, 0x36, 0x53, 0xaf, 0xc6, 0x3f, 0x08, 0x4e, 0xc9, 0xac,
+ 0x18, 0x8a, 0x85, 0x4e, 0xb8, 0x55, 0x47, 0x91, 0xe2, 0xca, 0xf2, 0x8e, 0x02, 0xae, 0x7d, 0x0e,
+ 0x74, 0xf8, 0xd0, 0x26, 0x75, 0xb1, 0x71, 0xe6, 0x76, 0xc0, 0x0d, 0x48, 0xd2, 0xf0, 0x32, 0xa0,
+ 0xeb, 0x24, 0x1f, 0x09, 0xc3, 0x07, 0x3a, 0xa3, 0xd1, 0xe9, 0xde, 0xda, 0xb8, 0xdd, 0xbd, 0x53,
+ 0xdb, 0x81, 0x42, 0x27, 0x4a, 0x47, 0x93, 0x2c, 0xed, 0x98, 0x2e, 0xf4, 0xf6, 0xea, 0xa6, 0x73,
+ 0x1b, 0x76, 0x0b, 0x42, 0x52, 0x66, 0xb2, 0xf5, 0xf3, 0x43, 0x56, 0xba, 0x96, 0x73, 0xbc, 0xf2,
+ 0x65, 0x8a, 0xe4, 0xa3, 0xbb, 0x04, 0xee, 0x21, 0xd6, 0xca, 0x05, 0x71, 0xcb, 0x8b, 0xe8, 0x2d,
+ 0x5e, 0x25, 0xf3, 0xc6, 0xf6, 0xf6, 0x6e, 0xcd, 0xc0, 0x87, 0xb7, 0xf7, 0x45, 0x31, 0x8d, 0x00,
+ 0x06, 0x9c, 0x68, 0xdc, 0x85, 0x9e, 0xa6, 0x4f, 0x8a, 0xe9, 0x03, 0x79, 0x97, 0x8c, 0x50, 0xaa,
+ 0x92, 0x5e, 0x27, 0x39, 0xa3, 0xd3, 0x69, 0xde, 0x69, 0x81, 0xa4, 0x87, 0x89, 0xf2, 0x73, 0x00,
+ 0xba, 0x30, 0x11, 0x05, 0x45, 0xe0, 0xd8, 0x05, 0x49, 0x88, 0xaa, 0xd5, 0x1a, 0x6d, 0x5c, 0xef,
+ 0x41, 0x72, 0x1e, 0xc5, 0x4b, 0x08, 0x7f, 0x98, 0xc9, 0xb7, 0x69, 0xa3, 0x6d, 0x50, 0x5c, 0xf1,
+ 0x61, 0x72, 0x4e, 0xaf, 0xb6, 0xcf, 0xe0, 0x76, 0x89, 0x6b, 0xae, 0xab, 0x57, 0xc4, 0x07, 0xa9,
+ 0xb2, 0x06, 0x98, 0xe5, 0xc9, 0x05, 0x8a, 0x99, 0xbd, 0x31, 0xae, 0xd6, 0xd9, 0x33, 0xe8, 0x1e,
+ 0x17, 0x93, 0x9a, 0x5b, 0xad, 0x83, 0xd7, 0x5a, 0x94, 0x02, 0xd6, 0xd1, 0xfd, 0x56, 0x8b, 0x5b,
+ 0x97, 0x9e, 0xb3, 0x8e, 0x8e, 0x5c, 0x17, 0x31, 0x37, 0xa0, 0xda, 0xec, 0xee, 0xb4, 0xb7, 0x1b,
+ 0x7b, 0x8d, 0xd2, 0xc3, 0xf4, 0x9c, 0x42, 0x35, 0x6f, 0x30, 0x74, 0x58, 0x28, 0xcc, 0xeb, 0x6c,
+ 0xed, 0xef, 0xf1, 0x47, 0xce, 0x07, 0x99, 0xf9, 0x05, 0xfb, 0xa3, 0xb0, 0x87, 0xed, 0xcb, 0xb5,
+ 0xa8, 0x9f, 0x78, 0x98, 0x11, 0x8f, 0x1e, 0x11, 0x46, 0x34, 0x13, 0x28, 0x87, 0x36, 0xbe, 0x27,
+ 0xde, 0x43, 0x1f, 0x64, 0xe7, 0xe4, 0x50, 0xf6, 0x21, 0xa4, 0x48, 0x68, 0x39, 0xa2, 0xa7, 0x91,
+ 0x68, 0xea, 0x95, 0x1f, 0x90, 0x9c, 0x3a, 0xc1, 0xe0, 0x9d, 0xec, 0xbd, 0x5d, 0x7a, 0xb7, 0x41,
+ 0x61, 0xeb, 0xb9, 0x77, 0xd4, 0xcc, 0x3d, 0xcf, 0x87, 0x40, 0x02, 0x35, 0x16, 0x77, 0x8c, 0x96,
+ 0x71, 0x07, 0x00, 0xf2, 0xed, 0x45, 0x01, 0x64, 0x1c, 0x96, 0x4b, 0x72, 0x81, 0x48, 0x66, 0x75,
+ 0xed, 0xd1, 0xe7, 0xeb, 0x0b, 0x9f, 0xc2, 0xef, 0xef, 0x9f, 0xaf, 0x27, 0x1e, 0x7c, 0xb1, 0x9e,
+ 0x78, 0x04, 0xbf, 0x3f, 0xc0, 0xef, 0x2f, 0xf0, 0x3b, 0xcc, 0xf2, 0x7b, 0xc7, 0x9b, 0xff, 0x0d,
+ 0x00, 0x00, 0xff, 0xff, 0x6c, 0xaf, 0xb7, 0x57, 0xd1, 0x1b, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/api/types.proto b/vendor/src/github.com/docker/swarmkit/api/types.proto
new file mode 100644
index 0000000000..27ec77e98d
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/api/types.proto
@@ -0,0 +1,564 @@
+syntax = "proto3";
+
+package docker.swarmkit.v1;
+
+import "timestamp/timestamp.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "duration/duration.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "gogoproto/gogo.proto";
+
+// This file contains types that are common to objects and spec or that are not
+// considered first-class within the cluster object-model.
+
+// Version tracks the last time an object in the store was updated.
+message Version {
+ uint64 index = 1;
+}
+
+// Annotations provide useful information to identify API objects. They are
+// common to all API specs.
+message Annotations {
+ string name = 1;
+ map<string, string> labels = 2;
+}
+
+message Resources {
+ // Amount of CPUs (e.g. 2000000000 = 2 CPU cores)
+ int64 nano_cpus = 1 [(gogoproto.customname) = "NanoCPUs"];
+
+ // Amount of memory in bytes.
+ int64 memory_bytes = 2;
+}
+
+message ResourceRequirements {
+ Resources limits = 1;
+ Resources reservations = 2;
+}
+
+message Platform {
+ // Architecture (e.g. x86_64)
+ string architecture = 1;
+
+ // Operating System (e.g. linux)
+ string os = 2 [(gogoproto.customname) = "OS"];
+}
+
+// PluginDescription describes an engine plugin.
+message PluginDescription {
+ // Type of plugin. Canonical values for existing types are
+ // Volume, Network, and Authorization. More types could be
+ // supported in the future.
+ string type = 1;
+
+ // Name of the plugin
+ string name = 2;
+}
+
+message EngineDescription {
+ // Docker daemon version running on the node.
+ string engine_version = 1;
+
+ // Labels attached to the engine.
+ map<string, string> labels = 2;
+
+ // Volume, Network, and Auth plugins
+ repeated PluginDescription plugins = 3 [(gogoproto.nullable) = false];
+}
+
+message NodeDescription {
+ // Hostname of the node as reported by the agent.
+ // This is different from spec.meta.name which is user-defined.
+ string hostname = 1;
+
+ // Platform of the node.
+ Platform platform = 2;
+
+ // Total resources on the node.
+ Resources resources = 3;
+
+ // Information about the Docker Engine on the node.
+ EngineDescription engine = 4;
+}
+
+message RaftMemberStatus {
+ bool leader = 1;
+
+ enum Reachability {
+ // Unknown indicates that the manager state cannot be resolved
+ UNKNOWN = 0;
+
+ // Unreachable indicates that the node cannot be contacted by other
+ // raft cluster members.
+ UNREACHABLE = 1;
+
+ // Reachable indicates that the node is healthy and reachable
+ // by other members.
+ REACHABLE = 2;
+ }
+
+ Reachability reachability = 2;
+ string message = 3;
+}
+
+message NodeStatus {
+ // TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`.
+ enum State {
+ // Unknown indicates the node state cannot be resolved.
+ UNKNOWN = 0;
+
+ // Down indicates the node is down.
+ DOWN = 1;
+
+ // Ready indicates the node is ready to accept tasks.
+ READY = 2;
+
+ // Disconnected indicates the node is currently trying to find new manager.
+ DISCONNECTED = 3;
+ }
+
+ State state = 1;
+ string message = 2;
+}
+
+message Image {
+ // reference is a docker image reference. This can include a rpository, tag
+ // or be fully qualified witha digest. The format is specified in the
+ // distribution/reference package.
+ string reference = 1;
+}
+
+// Mount describes volume mounts for a container.
+//
+// The Mount type follows the structure of the mount syscall, including a type,
+// source, target. Top-level flags, such as writable, are common to all kinds
+// of mounts, where we also provide options that are specific to a type of
+// mount. This corresponds to flags and data, respectively, in the syscall.
+message Mount {
+ enum Type {
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "MountType";
+
+ BIND = 0 [(gogoproto.enumvalue_customname) = "MountTypeBind"]; // Bind mount host dir
+ VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"]; // Remote storage volumes
+ }
+
+ // Type defines the nature of the mount.
+ Type type = 1;
+
+ // Source path to mount
+ string source = 2;
+
+ // Target path in container
+ string target = 3;
+
+ // Writable should be set to true if the mount should be writable from the
+ // container.
+ bool writable = 4;
+
+ // BindOptions specifies options that are specific to a bind mount.
+ message BindOptions {
+ enum Propagation {
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "MountPropagation";
+
+ RPRIVATE = 0 [(gogoproto.enumvalue_customname) = "MountPropagationRPrivate"];
+ PRIVATE = 1 [(gogoproto.enumvalue_customname) = "MountPropagationPrivate"];
+ RSHARED = 2 [(gogoproto.enumvalue_customname) = "MountPropagationRShared"];
+ SHARED = 3 [(gogoproto.enumvalue_customname) = "MountPropagationShared"];
+ RSLAVE = 4 [(gogoproto.enumvalue_customname) = "MountPropagationRSlave"];
+ SLAVE = 5 [(gogoproto.enumvalue_customname) = "MountPropagationSlave"];
+ }
+
+ // Propagation mode of mount.
+ Propagation propagation = 1;
+ }
+
+ // VolumeOptions contains parameters for mounting the volume.
+ message VolumeOptions {
+ // populate volume with data from target
+ bool populate = 1;
+
+ // labels to apply to the volume if creating
+ map<string, string> labels = 2;
+
+ // DriverConfig specifies the options that may be passed to the driver
+ // if the volume is created.
+ //
+ // If this is empty, no volume will be created if the volume is missing.
+ Driver driver_config = 3;
+ }
+
+ // Depending on type, one of bind_options or volumes_options will be set.
+
+ // BindOptions configures properties of a bind mount type.
+ BindOptions bind_options = 5;
+
+ // VolumeOptions configures the properties specific to a volume mount type.
+ VolumeOptions volume_options = 6;
+
+ // TODO(stevvooe): It be better to use a oneof field above, although the
+ // type is enough to make the decision, while being primary to the
+ // datastructure.
+}
+
+message RestartPolicy {
+ enum RestartCondition {
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "RestartCondition";
+ NONE = 0 [(gogoproto.enumvalue_customname) = "RestartOnNone"];
+ ON_FAILURE = 1 [(gogoproto.enumvalue_customname) = "RestartOnFailure"];
+ ANY = 2 [(gogoproto.enumvalue_customname) = "RestartOnAny"];
+ }
+
+ RestartCondition condition = 1;
+
+ // Delay between restart attempts
+ Duration delay = 2;
+
+ // MaxAttempts is the maximum number of restarts to attempt on an
+ // instance before giving up. Ignored if 0.
+ uint64 max_attempts = 3;
+
+ // Window is the time window used to evaluate the restart policy.
+ // The time window is unbounded if this is 0.
+ Duration window = 4;
+}
+
+// UpdateConfig specifies the rate and policy of updates.
+// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy.
+message UpdateConfig {
+ // Maximum number of tasks to be updated in one iteration.
+ // 0 means unlimited parallelism.
+ uint64 parallelism = 1;
+
+ // Amount of time between updates.
+ Duration delay = 2 [(gogoproto.nullable) = false];
+}
+
+// TaskState enumerates the states that a task progresses through within an
+// agent. States are designed to be monotonically increasing, such that if two
+// states are seen by a task, the greater of the new represents the true state.
+enum TaskState {
+ // TODO(aluzzardi): Move it back into `TaskStatus` because of the naming
+ // collisions of enums.
+
+ option (gogoproto.goproto_enum_prefix) = false;
+ option (gogoproto.enum_customname) = "TaskState";
+ NEW = 0 [(gogoproto.enumvalue_customname)="TaskStateNew"];
+ ALLOCATED = 64 [(gogoproto.enumvalue_customname)="TaskStateAllocated"]; // successful allocation of resources that the task needs
+ PENDING = 128 [(gogoproto.enumvalue_customname) = "TaskStatePending"]; // observed by scheduler but unassigned.
+ ASSIGNED = 192 [(gogoproto.enumvalue_customname)="TaskStateAssigned"];
+ ACCEPTED = 256 [(gogoproto.enumvalue_customname)="TaskStateAccepted"]; // task has been accepted by an agent.
+ PREPARING = 320 [(gogoproto.enumvalue_customname)="TaskStatePreparing"];
+ READY = 384 [(gogoproto.enumvalue_customname)="TaskStateReady"];
+ STARTING = 448 [(gogoproto.enumvalue_customname)="TaskStateStarting"];
+ RUNNING = 512 [(gogoproto.enumvalue_customname)="TaskStateRunning"];
+ COMPLETE = 576 [(gogoproto.enumvalue_customname)="TaskStateCompleted"]; // successful completion of task (not error code, just ran)
+ SHUTDOWN = 640 [(gogoproto.enumvalue_customname)="TaskStateShutdown"]; // orchestrator requested shutdown
+ FAILED = 704 [(gogoproto.enumvalue_customname)="TaskStateFailed"]; // task execution failed with error
+ REJECTED = 768 [(gogoproto.enumvalue_customname)="TaskStateRejected"]; // task could not be executed here.
+
+ // NOTE(stevvooe): The state of a task is actually a lamport clock, in that
+ // given two observations, the greater of the two can be considered
+ // correct. To enforce this, we only allow tasks to proceed to a greater
+ // state.
+ //
+ // A byproduct of this design decision is that we must also maintain this
+ // invariant in the protobuf enum values, such that when comparing two
+ // values, the one with the greater value is also the greater state.
+ //
+ // Because we may want to add intervening states a later date, we've left
+ // 64 spaces between each one. This should allow us to make 5 or 6
+ // insertions between each state if we find that we made a mistake and need
+ // another state.
+ //
+ // Remove this message when the states are deemed perfect.
+}
+
+// Container specific status.
+message ContainerStatus {
+ string container_id = 1 [(gogoproto.customname) = "ContainerID"];
+
+ int32 pid = 2 [(gogoproto.customname) = "PID"];
+ int32 exit_code = 3;
+}
+
+message TaskStatus {
+ Timestamp timestamp = 1;
+
+ // State expresses the current state of the task.
+ TaskState state = 2;
+
+ // Message reports a message for the task status. This should provide a
+ // human readable message that can point to how the task actually arrived
+ // at a current state.
+ //
+ // As a convention, we place the a small message here that led to the
+ // current state. For example, if the task is in ready, because it was
+ // prepared, we'd place "prepared" in this field. If we skipped preparation
+ // because the task is prepared, we would put "already prepared" in this
+ // field.
+ string message = 3;
+
+ // Err is set if the task is in an error state.
+ //
+ // The following states should report a companion error:
+ //
+ // FAILED, REJECTED
+ //
+ // TODO(stevvooe) Integrate this field with the error interface.
+ string err = 4;
+
+ // Container status contains container specific status information.
+ oneof runtime_status {
+ ContainerStatus container = 5;
+ }
+}
+
+// IPAMConfig specifies parameters for IP Address Management.
+message IPAMConfig {
+ // TODO(stevvooe): It may make more sense to manage IPAM and network
+ // definitions separately. This will allow multiple networks to share IPAM
+ // instances. For now, we will follow the conventions of libnetwork and
+ // specify this as part of the network specification.
+
+ // AddressFamily specifies the network address family that
+ // this IPAMConfig belongs to.
+ enum AddressFamily {
+ UNKNOWN = 0; // satisfy proto3
+ IPV4 = 4;
+ IPV6 = 6;
+ }
+
+ AddressFamily family = 1;
+
+ // Subnet defines a network as a CIDR address (ie network and mask
+ // 192.168.0.1/24).
+ string subnet = 2;
+
+ // Range defines the portion of the subnet to allocate to tasks. This is
+ // defined as a subnet within the primary subnet.
+ string range = 3;
+
+ // Gateway address within the subnet.
+ string gateway = 4;
+
+ // Reserved is a list of address from the master pool that should *not* be
+ // allocated. These addresses may have already been allocated or may be
+ // reserved for another allocation manager.
+ map<string, string> reserved = 5;
+}
+
+// PortConfig specifies an exposed port which can be
+// addressed using the given name. This can be later queried
+// using a service discovery api or a DNS SRV query. The node
+// port specifies a port that can be used to address this
+// service external to the cluster by sending a connection
+// request to this port to any node on the cluster.
+message PortConfig {
+ enum Protocol {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ TCP = 0 [(gogoproto.enumvalue_customname) = "ProtocolTCP"];
+ UDP = 1 [(gogoproto.enumvalue_customname) = "ProtocolUDP"];
+ }
+
+ // Name for the port. If provided the port information can
+ // be queried using the name as in a DNS SRV query.
+ string name = 1;
+
+ // Protocol for the port which is exposed.
+ Protocol protocol = 2;
+
+ // The port which the application is exposing and is bound to.
+ uint32 target_port = 3;
+
+ // PublishedPort specifies the port on which the service is
+ // exposed. If specified, the port must be
+ // within the available range. If not specified, an available
+ // port is automatically assigned.
+ uint32 published_port = 4;
+}
+
+// Driver is a generic driver type to be used throughout the API. For now, a
+// driver is simply a name and set of options. The field contents depend on the
+// target use case and driver application. For example, a network driver may
+// have different rules than a volume driver.
+message Driver {
+ string name = 1;
+ map <string, string> options = 2;
+}
+
+message IPAMOptions {
+ Driver driver = 1;
+ repeated IPAMConfig configs = 3;
+}
+
+// Peer should be used anywhere where we are describing a remote peer.
+message Peer {
+ string node_id = 1 [(gogoproto.customname) = "NodeID"];
+ string addr = 2;
+}
+
+// WeightedPeer should be used anywhere where we are describing a remote peer
+// with a weight.
+message WeightedPeer {
+ Peer peer = 1;
+ int64 weight = 2;
+}
+
+
+message IssuanceStatus {
+ enum State {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "IssuanceStateUnknown"];
+ RENEW = 1 [(gogoproto.enumvalue_customname)="IssuanceStateRenew"]; // Certificate should be issued
+ PENDING = 2 [(gogoproto.enumvalue_customname)="IssuanceStatePending"]; // Certificate is pending acceptance
+ ISSUED = 3 [(gogoproto.enumvalue_customname)="IssuanceStateIssued"]; // successful completion certificate issuance
+ FAILED = 4 [(gogoproto.enumvalue_customname)="IssuanceStateFailed"]; // Certificate issuance failed
+ }
+ State state = 1;
+
+ // Err is set if the Certificate Issuance is in an error state.
+ // The following states should report a companion error:
+ // FAILED
+ string err = 2;
+}
+
+message AcceptancePolicy {
+ message RoleAdmissionPolicy {
+ message HashedSecret {
+ // The actual hashed content
+ bytes data = 1;
+ // The type of hash we are using
+ string alg = 2;
+ }
+
+ NodeRole role = 1;
+ // Autoaccept controls which roles' certificates are automatically
+ // issued without administrator intervention.
+ bool autoaccept = 2;
+ // Secret represents a user-provided string that is necessary for new
+ // nodes to join the cluster
+ HashedSecret secret = 3;
+ }
+
+ repeated RoleAdmissionPolicy policies = 1;
+}
+
+
+message CAConfig {
+ // NodeCertExpiry is the duration certificates should be issued for
+ Duration node_cert_expiry = 1;
+}
+
+// OrchestrationConfig defines cluster-level orchestration settings.
+message OrchestrationConfig {
+ // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
+ // node. If negative, never remove completed or failed tasks.
+ int64 task_history_retention_limit = 1;
+}
+
+// DispatcherConfig defines cluster-level dispatcher settings.
+message DispatcherConfig {
+ // HeartbeatPeriod defines how often agent should send heartbeats to
+ // dispatcher.
+ uint64 heartbeat_period = 1;
+}
+
+// RaftConfig defines raft settings for the cluster.
+message RaftConfig {
+ // SnapshotInterval is the number of log entries between snapshots.
+ uint64 snapshot_interval = 1;
+ // KeepOldSnapshots is the number of snapshots to keep beyond the
+ // current snapshot.
+ uint64 keep_old_snapshots = 2;
+ // LogEntriesForSlowFollowers is the number of log entries to keep
+ // around to sync up slow followers after a snapshot is created.
+ uint64 log_entries_for_slow_followers = 3;
+ // HeartbeatTick defines the amount of ticks (in seconds) between
+ // each heartbeat message sent to other members for health-check.
+ uint32 heartbeat_tick = 4;
+ // ElectionTick defines the amount of ticks (in seconds) needed
+ // without a leader to trigger a new election.
+ uint32 election_tick = 5;
+}
+
+message RaftMember {
+ // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified
+ // and is used only for information purposes
+ uint64 raft_id = 1 [(gogoproto.customname) = "RaftID"];
+
+ // Addr specifies the address of the member
+ string addr = 2;
+
+ // Status provides the current status of the manager from the perspective of another manager.
+ RaftMemberStatus status = 3 [(gogoproto.nullable) = false];
+}
+
+// Placement specifies task distribution constraints.
+message Placement {
+ // constraints specifies a set of requirements a node should meet for a task.
+ repeated string constraints = 1;
+}
+
+message RootCA {
+ // CAKey is the root CA private key.
+ bytes ca_key = 1 [(gogoproto.customname) = "CAKey"];
+
+ // CACert is the root CA certificate.
+ bytes ca_cert = 2 [(gogoproto.customname) = "CACert"];
+
+ // CACertHash is the digest of the CA Certificate.
+ string ca_cert_hash = 3 [(gogoproto.customname) = "CACertHash"];
+}
+
+
+enum NodeRole {
+ option (gogoproto.enum_customname) = "NodeRole";
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ WORKER = 0 [(gogoproto.enumvalue_customname) = "NodeRoleWorker"];
+ MANAGER = 1 [(gogoproto.enumvalue_customname) = "NodeRoleManager"];
+}
+
+message Certificate {
+ NodeRole role = 1;
+
+ bytes csr = 2 [(gogoproto.customname) = "CSR"];
+
+ IssuanceStatus status = 3 [(gogoproto.nullable) = false];
+
+ bytes certificate = 4;
+
+ // CN represents the node ID.
+ string cn = 5 [(gogoproto.customname) = "CN"];
+}
+
+
+// Symmetric keys to encrypt inter-agent communication.
+message EncryptionKey {
+ // Agent subsystem the key is intended for. Example:
+ // networking:gossip
+ string subsystem = 1;
+
+ // Encryption algorithm that can implemented using this key
+ enum Algorithm {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ AES_128_GCM = 0;
+ }
+
+ Algorithm algorithm = 2;
+
+ bytes key = 3;
+
+ // Time stamp from the lamport clock of the key allocator to
+ // identify the relative age of the key.
+ uint64 lamport_time = 4;
+}
+
+// ManagerStatus provides information about the status of a manager in the cluster.
+message ManagerStatus {
+ RaftMember raft = 1 [(gogoproto.nullable) = false];
+} \ No newline at end of file
diff --git a/vendor/src/github.com/docker/swarmkit/ca/auth.go b/vendor/src/github.com/docker/swarmkit/ca/auth.go
new file mode 100644
index 0000000000..08e06e8831
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/ca/auth.go
@@ -0,0 +1,213 @@
+package ca
+
+import (
+ "crypto/tls"
+ "crypto/x509/pkix"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/peer"
+)
+
+// LogTLSState logs information about the TLS connection and remote peers
+func LogTLSState(ctx context.Context, tlsState *tls.ConnectionState) {
+ if tlsState == nil {
+ log.G(ctx).Debugf("no TLS Chains found")
+ return
+ }
+
+ peerCerts := []string{}
+ verifiedChain := []string{}
+ for _, cert := range tlsState.PeerCertificates {
+ peerCerts = append(peerCerts, cert.Subject.CommonName)
+ }
+ for _, chain := range tlsState.VerifiedChains {
+ subjects := []string{}
+ for _, cert := range chain {
+ subjects = append(subjects, cert.Subject.CommonName)
+ }
+ verifiedChain = append(verifiedChain, strings.Join(subjects, ","))
+ }
+
+ log.G(ctx).WithFields(logrus.Fields{
+ "peer.peerCert": peerCerts,
+ // "peer.verifiedChain": verifiedChain},
+ }).Debugf("")
+}
+
+// getCertificateSubject extracts the subject from a verified client certificate
+func getCertificateSubject(tlsState *tls.ConnectionState) (pkix.Name, error) {
+ if tlsState == nil {
+ return pkix.Name{}, grpc.Errorf(codes.PermissionDenied, "request is not using TLS")
+ }
+ if len(tlsState.PeerCertificates) == 0 {
+ return pkix.Name{}, grpc.Errorf(codes.PermissionDenied, "no client certificates in request")
+ }
+ if len(tlsState.VerifiedChains) == 0 {
+ return pkix.Name{}, grpc.Errorf(codes.PermissionDenied, "no verified chains for remote certificate")
+ }
+
+ return tlsState.VerifiedChains[0][0].Subject, nil
+}
+
+func tlsConnStateFromContext(ctx context.Context) (*tls.ConnectionState, error) {
+ peer, ok := peer.FromContext(ctx)
+ if !ok {
+ return nil, grpc.Errorf(codes.PermissionDenied, "Permission denied: no peer info")
+ }
+ tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo)
+ if !ok {
+ return nil, grpc.Errorf(codes.PermissionDenied, "Permission denied: peer didn't not present valid peer certificate")
+ }
+ return &tlsInfo.State, nil
+}
+
+// certSubjectFromContext extracts pkix.Name from context.
+func certSubjectFromContext(ctx context.Context) (pkix.Name, error) {
+ connState, err := tlsConnStateFromContext(ctx)
+ if err != nil {
+ return pkix.Name{}, err
+ }
+ return getCertificateSubject(connState)
+}
+
+// AuthorizeOrgAndRole takes in a context and a list of roles, and returns
+// the Node ID of the node.
+func AuthorizeOrgAndRole(ctx context.Context, org string, ou ...string) (string, error) {
+ certSubj, err := certSubjectFromContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ // Check if the current certificate has an OU that authorizes
+ // access to this method
+ if intersectArrays(certSubj.OrganizationalUnit, ou) {
+ return authorizeOrg(ctx, org)
+ }
+
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of OUs: %v", ou)
+}
+
+// authorizeOrg takes in a context and an organization, and returns
+// the Node ID of the node.
+func authorizeOrg(ctx context.Context, org string) (string, error) {
+ certSubj, err := certSubjectFromContext(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ if len(certSubj.Organization) > 0 && certSubj.Organization[0] == org {
+ return certSubj.CommonName, nil
+ }
+
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of organization: %s", org)
+}
+
+// AuthorizeForwardedRoleAndOrg checks for proper roles and organization of caller. The RPC may have
+// been proxied by a manager, in which case the manager is authenticated and
+// so is the certificate information that it forwarded. It returns the node ID
+// of the original client.
+func AuthorizeForwardedRoleAndOrg(ctx context.Context, authorizedRoles, forwarderRoles []string, org string) (string, error) {
+ if isForwardedRequest(ctx) {
+ _, err := AuthorizeOrgAndRole(ctx, org, forwarderRoles...)
+ if err != nil {
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarder role: %v", err)
+ }
+
+ // This was a forwarded request. Authorize the forwarder, and
+ // check if the forwarded role matches one of the authorized
+ // roles.
+ forwardedID, forwardedOrg, forwardedOUs := forwardedTLSInfoFromContext(ctx)
+
+ if len(forwardedOUs) == 0 || forwardedID == "" || forwardedOrg == "" {
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
+ }
+
+ if !intersectArrays(forwardedOUs, authorizedRoles) {
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarded role, expecting: %v", authorizedRoles)
+ }
+
+ if forwardedOrg != org {
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: organization mismatch, expecting: %s", org)
+ }
+
+ return forwardedID, nil
+ }
+
+ // There wasn't any node being forwarded, check if this is a direct call by the expected role
+ nodeID, err := AuthorizeOrgAndRole(ctx, org, authorizedRoles...)
+ if err == nil {
+ return nodeID, nil
+ }
+
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: unauthorized peer role: %v", err)
+}
+
+// intersectArrays returns true when there is at least one element in common
+// between the two arrays
+func intersectArrays(orig, tgt []string) bool {
+ for _, i := range orig {
+ for _, x := range tgt {
+ if i == x {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// RemoteNodeInfo describes a node sending an RPC request.
+type RemoteNodeInfo struct {
+ // Roles is a list of roles contained in the node's certificate
+ // (or forwarded by a trusted node).
+ Roles []string
+
+ // Organization is the organization contained in the node's certificate
+ // (or forwarded by a trusted node).
+ Organization string
+
+ // NodeID is the node's ID, from the CN field in its certificate
+ // (or forwarded by a trusted node).
+ NodeID string
+
+ // ForwardedBy contains information for the node that forwarded this
+ // request. It is set to nil if the request was received directly.
+ ForwardedBy *RemoteNodeInfo
+}
+
+// RemoteNode returns the node ID and role from the client's TLS certificate.
+// If the RPC was forwarded, the original client's ID and role is returned, as
+// well as the forwarder's ID. This function does not do authorization checks -
+// it only looks up the node ID.
+func RemoteNode(ctx context.Context) (RemoteNodeInfo, error) {
+ certSubj, err := certSubjectFromContext(ctx)
+ if err != nil {
+ return RemoteNodeInfo{}, err
+ }
+
+ org := ""
+ if len(certSubj.Organization) > 0 {
+ org = certSubj.Organization[0]
+ }
+
+ directInfo := RemoteNodeInfo{
+ Roles: certSubj.OrganizationalUnit,
+ NodeID: certSubj.CommonName,
+ Organization: org,
+ }
+
+ if isForwardedRequest(ctx) {
+ cn, org, ous := forwardedTLSInfoFromContext(ctx)
+ if len(ous) == 0 || cn == "" || org == "" {
+ return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
+ }
+ return RemoteNodeInfo{Roles: ous, NodeID: cn, Organization: org, ForwardedBy: &directInfo}, nil
+ }
+
+ return directInfo, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/ca/certificates.go b/vendor/src/github.com/docker/swarmkit/ca/certificates.go
new file mode 100644
index 0000000000..2b9d70dbd4
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/ca/certificates.go
@@ -0,0 +1,711 @@
+package ca
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ log "github.com/Sirupsen/logrus"
+ cfcsr "github.com/cloudflare/cfssl/csr"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/initca"
+ cflog "github.com/cloudflare/cfssl/log"
+ cfsigner "github.com/cloudflare/cfssl/signer"
+ "github.com/cloudflare/cfssl/signer/local"
+ "github.com/docker/distribution/digest"
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/ioutils"
+ "github.com/docker/swarmkit/picker"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+const (
+ // Security Strength Equivalence
+ //-----------------------------------
+ //| Key-type | ECC | DH/DSA/RSA |
+ //| Node | 256 | 3072 |
+ //| Root | 384 | 7680 |
+ //-----------------------------------
+
+ // RootKeySize is the default size of the root CA key
+ RootKeySize = 384
+ // RootKeyAlgo defines the default algorithm for the root CA Key
+ RootKeyAlgo = "ecdsa"
+ // PassphraseENVVar defines the environment variable to look for the
+ // root CA private key material encryption key
+ PassphraseENVVar = "SWARM_ROOT_CA_PASSPHRASE"
+ // PassphraseENVVarPrev defines the alternate environment variable to look for the
+ // root CA private key material encryption key. It can be used for seamless
+ // KEK rotations.
+ PassphraseENVVarPrev = "SWARM_ROOT_CA_PASSPHRASE_PREV"
+ // RootCAExpiration represents the expiration for the root CA in seconds (20 years)
+ RootCAExpiration = "630720000s"
+ // DefaultNodeCertExpiration represents the default expiration for node certificates (3 months)
+ DefaultNodeCertExpiration = 2160 * time.Hour
+ // CertLowerRotationRange represents the minimum fraction of time that we will wait when randomly
+ // choosing our next certificate rotation
+ CertLowerRotationRange = 0.5
+ // CertUpperRotationRange represents the maximum fraction of time that we will wait when randomly
+ // choosing our next certificate rotation
+ CertUpperRotationRange = 0.8
+ // MinNodeCertExpiration represents the minimum expiration for node certificates (25 + 5 minutes)
+ // X - 5 > CertUpperRotationRange * X <=> X < 5/(1 - CertUpperRotationRange)
+ // Since we're issuing certificates 5 minutes in the past to get around clock drifts, and
+ // we're selecting a random rotation distribution range from CertLowerRotationRange to
+ // CertUpperRotationRange, we need to ensure that we don't accept an expiration time that will
+ // make a node able to randomly choose the next rotation after the expiration of the certificate.
+ MinNodeCertExpiration = 30 * time.Minute
+)
+
+// ErrNoLocalRootCA is an error type used to indicate that the local root CA
+// certificate file does not exist.
+var ErrNoLocalRootCA = errors.New("local root CA certificate does not exist")
+
+// ErrNoValidSigner is an error type used to indicate that our RootCA doesn't have the ability to
+// sign certificates.
+var ErrNoValidSigner = errors.New("no valid signer found")
+
+func init() {
+ cflog.Level = 5
+}
+
+// CertPaths is a helper struct that keeps track of the paths of a
+// Cert and corresponding Key
+type CertPaths struct {
+ Cert, Key string
+}
+
+// RootCA is the representation of everything we need to sign certificates
+type RootCA struct {
+ // Key will only be used by the original manager to put the private
+ // key-material in raft, no signing operations depend on it.
+ Key []byte
+ // Cert includes the PEM encoded Certificate for the Root CA
+ Cert []byte
+ Pool *x509.CertPool
+ // Digest of the serialized bytes of the certificate
+ Digest digest.Digest
+ // This signer will be nil if the node doesn't have the appropriate key material
+ Signer cfsigner.Signer
+}
+
+// CanSign ensures that the signer has all three necessary elements needed to operate
+func (rca *RootCA) CanSign() bool {
+ if rca.Cert == nil || rca.Pool == nil || rca.Signer == nil {
+ return false
+ }
+
+ return true
+}
+
+// IssueAndSaveNewCertificates generates a new key-pair, signs it with the local root-ca, and returns a
+// tls certificate
+func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org string) (*tls.Certificate, error) {
+ csr, key, err := GenerateAndWriteNewKey(paths)
+ if err != nil {
+ log.Debugf("error when generating new node certs: %v", err)
+ return nil, err
+ }
+
+ var signedCert []byte
+ if !rca.CanSign() {
+ return nil, ErrNoValidSigner
+ }
+
+ // Obtain a signed Certificate
+ signedCert, err = rca.ParseValidateAndSignCSR(csr, cn, ou, org)
+ if err != nil {
+ log.Debugf("failed to sign node certificate: %v", err)
+ return nil, err
+ }
+
+ // Ensure directory exists
+ err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the chain to disk
+ if err := ioutils.AtomicWriteFile(paths.Cert, signedCert, 0644); err != nil {
+ return nil, err
+ }
+
+ // Create a valid TLSKeyPair out of the PEM encoded private key and certificate
+ tlsKeyPair, err := tls.X509KeyPair(signedCert, key)
+ if err != nil {
+ return nil, err
+ }
+
+ log.Debugf("locally issued new TLS certificate for node ID: %s and role: %s", cn, ou)
+ return &tlsKeyPair, nil
+}
+
+// RequestAndSaveNewCertificates gets new certificates issued, either by signing them locally if a signer is
+// available, or by requesting them from the remote server at remoteAddr.
+func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, role, secret string, picker *picker.Picker, transport credentials.TransportAuthenticator, nodeInfo chan<- string) (*tls.Certificate, error) {
+ // Create a new key/pair and CSR for the new manager
+ // Write the new CSR and the new key to a temporary location so we can survive crashes on rotation
+ tempPaths := genTempPaths(paths)
+ csr, key, err := GenerateAndWriteNewKey(tempPaths)
+ if err != nil {
+ log.Debugf("error when generating new node certs: %v", err)
+ return nil, err
+ }
+
+ // Get the remote manager to issue a CA signed certificate for this node
+ signedCert, err := GetRemoteSignedCertificate(ctx, csr, role, secret, rca.Pool, picker, transport, nodeInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ // Доверяй, но проверяй.
+ // Before we overwrite our local certificate, let's make sure the server gave us one that is valid
+ // Create an X509Cert so we can .Verify()
+ certBlock, _ := pem.Decode(signedCert)
+ if certBlock == nil {
+ return nil, fmt.Errorf("failed to parse certificate PEM")
+ }
+ X509Cert, err := x509.ParseCertificate(certBlock.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ // Include our current root pool
+ opts := x509.VerifyOptions{
+ Roots: rca.Pool,
+ }
+ // Check to see if this certificate was signed by our CA, and isn't expired
+ if _, err := X509Cert.Verify(opts); err != nil {
+ return nil, err
+ }
+
+ log.Infof("Downloaded new TLS credentials with role: %s.", role)
+
+ // Ensure directory exists
+ err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the chain to disk
+ if err := ioutils.AtomicWriteFile(paths.Cert, signedCert, 0644); err != nil {
+ return nil, err
+ }
+
+ // Move the new key to the final location
+ if err := os.Rename(tempPaths.Key, paths.Key); err != nil {
+ return nil, err
+ }
+
+ // Create a valid TLSKeyPair out of the PEM encoded private key and certificate
+ tlsKeyPair, err := tls.X509KeyPair(signedCert, key)
+ if err != nil {
+ return nil, err
+ }
+
+ return &tlsKeyPair, nil
+}
+
+// ParseValidateAndSignCSR returns a signed certificate from a particular rootCA and a CSR.
+func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string) ([]byte, error) {
+ if !rca.CanSign() {
+ return nil, ErrNoValidSigner
+ }
+
+ // All managers get added the subject-alt-name of CA, so they can be used for cert issuance
+ hosts := []string{ou}
+ if ou == ManagerRole {
+ hosts = append(hosts, CARole)
+ }
+
+ cert, err := rca.Signer.Sign(cfsigner.SignRequest{
+ Request: string(csrBytes),
+ // OU is used for Authentication of the node type. The CN has the random
+ // node ID.
+ Subject: &cfsigner.Subject{CN: cn, Names: []cfcsr.Name{{OU: ou, O: org}}},
+ // Adding ou as DNS alt name, so clients can connect to ManagerRole and CARole
+ Hosts: hosts,
+ })
+ if err != nil {
+ log.Debugf("failed to sign node certificate: %v", err)
+ return nil, err
+ }
+
+ return cert, nil
+}
+
+// NewRootCA creates a new RootCA object from unparsed cert and key byte
+// slices. key may be nil, and in this case NewRootCA will return a RootCA
+// without a signer.
+func NewRootCA(cert, key []byte, certExpiry time.Duration) (RootCA, error) {
+ // Check to see if the Certificate file is a valid, self-signed Cert
+ parsedCA, err := helpers.ParseSelfSignedCertificatePEM(cert)
+ if err != nil {
+ return RootCA{}, err
+ }
+
+ // Calculate the digest for our RootCACertificate
+ digest := digest.FromBytes(cert)
+
+ // Create a Pool with our RootCACertificate
+ pool := x509.NewCertPool()
+ if !pool.AppendCertsFromPEM(cert) {
+ return RootCA{}, fmt.Errorf("error while adding root CA cert to Cert Pool")
+ }
+
+ if len(key) == 0 {
+ // This RootCA does not have a valid signer.
+ return RootCA{Cert: cert, Digest: digest, Pool: pool}, nil
+ }
+
+ var (
+ passphraseStr string
+ passphrase, passphrasePrev []byte
+ priv crypto.Signer
+ )
+
+ // Attempt two distinct passphrases, so we can do a hitless passphrase rotation
+ if passphraseStr = os.Getenv(PassphraseENVVar); passphraseStr != "" {
+ passphrase = []byte(passphraseStr)
+ }
+
+ if p := os.Getenv(PassphraseENVVarPrev); p != "" {
+ passphrasePrev = []byte(p)
+ }
+
+ // Attempt to decrypt the current private-key with the passphrases provided
+ priv, err = helpers.ParsePrivateKeyPEMWithPassword(key, passphrase)
+ if err != nil {
+ priv, err = helpers.ParsePrivateKeyPEMWithPassword(key, passphrasePrev)
+ if err != nil {
+ log.Debug("Malformed private key %v", err)
+ return RootCA{}, err
+ }
+ }
+
+ if err := ensureCertKeyMatch(parsedCA, priv.Public()); err != nil {
+ return RootCA{}, err
+ }
+
+ signer, err := local.NewSigner(priv, parsedCA, cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry))
+ if err != nil {
+ return RootCA{}, err
+ }
+
+ // If the key was loaded from disk unencrypted, but there is a passphrase set,
+ // ensure it is encrypted, so it doesn't hit raft in plain-text
+ keyBlock, _ := pem.Decode(key)
+ if keyBlock == nil {
+ // This RootCA does not have a valid signer.
+ return RootCA{Cert: cert, Digest: digest, Pool: pool}, nil
+ }
+ if passphraseStr != "" && !x509.IsEncryptedPEMBlock(keyBlock) {
+ key, err = EncryptECPrivateKey(key, passphraseStr)
+ if err != nil {
+ return RootCA{}, err
+ }
+ }
+
+ return RootCA{Signer: signer, Key: key, Digest: digest, Cert: cert, Pool: pool}, nil
+}
+
+func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error {
+ switch certPub := cert.PublicKey.(type) {
+ // TODO: Handle RSA keys.
+ case *ecdsa.PublicKey:
+ ecKey, ok := key.(*ecdsa.PublicKey)
+ if ok && certPub.X.Cmp(ecKey.X) == 0 && certPub.Y.Cmp(ecKey.Y) == 0 {
+ return nil
+ }
+ default:
+ return fmt.Errorf("unknown or unsupported certificate public key algorithm")
+ }
+
+ return fmt.Errorf("certificate key mismatch")
+}
+
+// GetLocalRootCA validates if the contents of the file are a valid self-signed
+// CA certificate, and returns the PEM-encoded Certificate if so
+func GetLocalRootCA(baseDir string) (RootCA, error) {
+ paths := NewConfigPaths(baseDir)
+
+ // Check if we have a Certificate file
+ cert, err := ioutil.ReadFile(paths.RootCA.Cert)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = ErrNoLocalRootCA
+ }
+
+ return RootCA{}, err
+ }
+
+ key, err := ioutil.ReadFile(paths.RootCA.Key)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return RootCA{}, err
+ }
+ // There may not be a local key. It's okay to pass in a nil
+ // key. We'll get a root CA without a signer.
+ key = nil
+ }
+
+ rootCA, err := NewRootCA(cert, key, DefaultNodeCertExpiration)
+ if err == nil {
+ log.Debugf("successfully loaded the signer for the Root CA: %s", paths.RootCA.Cert)
+ }
+
+ return rootCA, err
+}
+
+// GetRemoteCA returns the remote endpoint's CA certificate
+func GetRemoteCA(ctx context.Context, d digest.Digest, picker *picker.Picker) (RootCA, error) {
+ // We need a valid picker to be able to Dial to a remote CA
+ if picker == nil {
+ return RootCA{}, fmt.Errorf("valid remote address picker required")
+ }
+
+ // This TLS Config is intentionally using InsecureSkipVerify. Either we're
+ // doing TOFU, in which case we don't validate the remote CA, or we're using
+ // a user supplied hash to check the integrity of the CA certificate.
+ insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})
+ opts := []grpc.DialOption{
+ grpc.WithTransportCredentials(insecureCreds),
+ grpc.WithBackoffMaxDelay(10 * time.Second),
+ grpc.WithPicker(picker)}
+
+ firstAddr, err := picker.PickAddr()
+ if err != nil {
+ return RootCA{}, err
+ }
+
+ conn, err := grpc.Dial(firstAddr, opts...)
+ if err != nil {
+ return RootCA{}, err
+ }
+ defer conn.Close()
+
+ client := api.NewCAClient(conn)
+ response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{})
+ if err != nil {
+ return RootCA{}, err
+ }
+
+ if d != "" {
+ verifier, err := digest.NewDigestVerifier(d)
+ if err != nil {
+ return RootCA{}, fmt.Errorf("unexpected error getting digest verifier: %v", err)
+ }
+
+ io.Copy(verifier, bytes.NewReader(response.Certificate))
+
+ if !verifier.Verified() {
+ return RootCA{}, fmt.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex())
+
+ }
+ }
+
+ // Check the validity of the remote Cert
+ _, err = helpers.ParseCertificatePEM(response.Certificate)
+ if err != nil {
+ return RootCA{}, err
+ }
+
+ // Create a Pool with our RootCACertificate
+ pool := x509.NewCertPool()
+ if !pool.AppendCertsFromPEM(response.Certificate) {
+ return RootCA{}, fmt.Errorf("failed to append certificate to cert pool")
+ }
+
+ return RootCA{Cert: response.Certificate, Pool: pool}, nil
+}
+
+// CreateAndWriteRootCA creates a Certificate authority for a new Swarm Cluster, potentially
+// overwriting any existing CAs.
+func CreateAndWriteRootCA(rootCN string, paths CertPaths) (RootCA, error) {
+ // Create a simple CSR for the CA using the default CA validator and policy
+ req := cfcsr.CertificateRequest{
+ CN: rootCN,
+ KeyRequest: &cfcsr.BasicKeyRequest{A: RootKeyAlgo, S: RootKeySize},
+ CA: &cfcsr.CAConfig{Expiry: RootCAExpiration},
+ }
+
+ // Generate the CA and get the certificate and private key
+ cert, _, key, err := initca.New(&req)
+ if err != nil {
+ return RootCA{}, err
+ }
+
+ // Ensure directory exists
+ err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
+ if err != nil {
+ return RootCA{}, err
+ }
+
+ // Write the Private Key and Certificate to disk, using decent permissions
+ if err := ioutils.AtomicWriteFile(paths.Cert, cert, 0644); err != nil {
+ return RootCA{}, err
+ }
+ if err := ioutils.AtomicWriteFile(paths.Key, key, 0600); err != nil {
+ return RootCA{}, err
+ }
+
+ return NewRootCA(cert, key, DefaultNodeCertExpiration)
+}
+
+// BootstrapCluster receives a directory and creates both new Root CA key material
+// and a ManagerRole key/certificate pair to be used by the initial cluster manager
+func BootstrapCluster(baseCertDir string) error {
+ paths := NewConfigPaths(baseCertDir)
+
+ rootCA, err := CreateAndWriteRootCA(rootCN, paths.RootCA)
+ if err != nil {
+ return err
+ }
+
+ nodeID := identity.NewNodeID()
+ newOrg := identity.NewID()
+ _, err = GenerateAndSignNewTLSCert(rootCA, nodeID, ManagerRole, newOrg, paths.Node)
+
+ return err
+}
+
+// GenerateAndSignNewTLSCert creates a new keypair, signs the certificate using signer,
+// and saves the certificate and key to disk. This method is used to bootstrap the first
+// manager TLS certificates.
+func GenerateAndSignNewTLSCert(rootCA RootCA, cn, ou, org string, paths CertPaths) (*tls.Certificate, error) {
+ // Generate and new keypair and CSR
+ csr, key, err := generateNewCSR()
+ if err != nil {
+ return nil, err
+ }
+
+ // Obtain a signed Certificate
+ cert, err := rootCA.ParseValidateAndSignCSR(csr, cn, ou, org)
+ if err != nil {
+ log.Debugf("failed to sign node certificate: %v", err)
+ return nil, err
+ }
+
+ // Append the root CA Key to the certificate, to create a valid chain
+ certChain := append(cert, rootCA.Cert...)
+
+ // Ensure directory exists
+ err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write both the chain and key to disk
+ if err := ioutils.AtomicWriteFile(paths.Cert, certChain, 0644); err != nil {
+ return nil, err
+ }
+ if err := ioutils.AtomicWriteFile(paths.Key, key, 0600); err != nil {
+ return nil, err
+ }
+
+ // Load a valid tls.Certificate from the chain and the key
+ serverCert, err := tls.X509KeyPair(certChain, key)
+ if err != nil {
+ return nil, err
+ }
+
+ return &serverCert, nil
+}
+
+// GenerateAndWriteNewKey generates a new pub/priv key pair, writes it to disk
+// and returns the CSR and the private key material
+func GenerateAndWriteNewKey(paths CertPaths) (csr, key []byte, err error) {
+ // Generate a new key pair
+ csr, key, err = generateNewCSR()
+ if err != nil {
+ return
+ }
+
+ // Ensure directory exists
+ err = os.MkdirAll(filepath.Dir(paths.Key), 0755)
+ if err != nil {
+ return
+ }
+
+ if err = ioutils.AtomicWriteFile(paths.Key, key, 0600); err != nil {
+ return
+ }
+
+ return
+}
+
+// GetRemoteSignedCertificate submits a CSR together with the intended role to a remote CA server address
+// available through a picker, and that is part of a CA identified by a specific certificate pool.
+func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret string, rootCAPool *x509.CertPool, picker *picker.Picker, creds credentials.TransportAuthenticator, nodeInfo chan<- string) ([]byte, error) {
+ if rootCAPool == nil {
+ return nil, fmt.Errorf("valid root CA pool required")
+ }
+ if picker == nil {
+ return nil, fmt.Errorf("valid remote address picker required")
+ }
+
+ if creds == nil {
+ // This is our only non-MTLS request, and it happens when we are boostraping our TLS certs
+ // We're using CARole as server name, so an external CA doesn't also have to have ManagerRole in the cert SANs
+ creds = credentials.NewTLS(&tls.Config{ServerName: CARole, RootCAs: rootCAPool})
+ }
+
+ opts := []grpc.DialOption{
+ grpc.WithTransportCredentials(creds),
+ grpc.WithBackoffMaxDelay(10 * time.Second),
+ grpc.WithPicker(picker)}
+
+ firstAddr, err := picker.PickAddr()
+ if err != nil {
+ return nil, err
+ }
+
+ conn, err := grpc.Dial(firstAddr, opts...)
+ if err != nil {
+ return nil, err
+ }
+ defer conn.Close()
+
+ // Create a CAClient to retreive a new Certificate
+ caClient := api.NewNodeCAClient(conn)
+
+ // Convert our internal string roles into an API role
+ apiRole, err := FormatRole(role)
+ if err != nil {
+ return nil, err
+ }
+
+ // Send the Request and retrieve the request token
+ issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Role: apiRole, Secret: secret}
+ issueResponse, err := caClient.IssueNodeCertificate(ctx, issueRequest)
+ if err != nil {
+ return nil, err
+ }
+
+ nodeID := issueResponse.NodeID
+ // Send back the NodeID on the nodeInfo, so the caller can know what ID was assigned by the CA
+ if nodeInfo != nil {
+ nodeInfo <- nodeID
+ }
+
+ statusRequest := &api.NodeCertificateStatusRequest{NodeID: nodeID}
+ expBackoff := events.NewExponentialBackoff(events.ExponentialBackoffConfig{
+ Base: time.Second,
+ Factor: time.Second,
+ Max: 30 * time.Second,
+ })
+
+ log.Infof("Waiting for TLS certificate to be issued...")
+ // Exponential backoff with Max of 30 seconds to wait for a new retry
+ for {
+ // Send the Request and retrieve the certificate
+ statusResponse, err := caClient.NodeCertificateStatus(ctx, statusRequest)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the certificate was issued, return
+ if statusResponse.Status.State == api.IssuanceStateIssued {
+ if statusResponse.Certificate == nil {
+ return nil, fmt.Errorf("no certificate in CertificateStatus response")
+ }
+ return statusResponse.Certificate.Certificate, nil
+ }
+
+ // If we're still pending, the issuance failed, or the state is unknown
+ // let's continue trying.
+ expBackoff.Failure(nil, nil)
+ time.Sleep(expBackoff.Proceed(nil))
+ }
+}
+
+// readCertExpiration returns the number of months left for certificate expiration
+func readCertExpiration(paths CertPaths) (time.Duration, error) {
+ // Read the Cert
+ cert, err := ioutil.ReadFile(paths.Cert)
+ if err != nil {
+ log.Debugf("failed to read certificate file: %s", paths.Cert)
+ return time.Hour, err
+ }
+
+ // Create an x509 certificate out of the contents on disk
+ certBlock, _ := pem.Decode([]byte(cert))
+ if certBlock == nil {
+ return time.Hour, fmt.Errorf("failed to decode certificate block")
+ }
+ X509Cert, err := x509.ParseCertificate(certBlock.Bytes)
+ if err != nil {
+ return time.Hour, err
+ }
+
+ return X509Cert.NotAfter.Sub(time.Now()), nil
+
+}
+
+func saveRootCA(rootCA RootCA, paths CertPaths) error {
+ // Make sure the necessary dirs exist and they are writable
+ err := os.MkdirAll(filepath.Dir(paths.Cert), 0755)
+ if err != nil {
+ return err
+ }
+
+ // If the root certificate got returned successfully, save the rootCA to disk.
+ return ioutils.AtomicWriteFile(paths.Cert, rootCA.Cert, 0644)
+}
+
+func generateNewCSR() (csr, key []byte, err error) {
+ req := &cfcsr.CertificateRequest{
+ KeyRequest: cfcsr.NewBasicKeyRequest(),
+ }
+
+ csr, key, err = cfcsr.ParseRequest(req)
+ if err != nil {
+ log.Debugf(`failed to generate CSR`)
+ return
+ }
+
+ return
+}
+
+// EncryptECPrivateKey receives a PEM encoded private key and returns an encrypted
+// AES256 version using a passphrase
+// TODO: Make this method generic to handle RSA keys
+func EncryptECPrivateKey(key []byte, passphraseStr string) ([]byte, error) {
+ passphrase := []byte(passphraseStr)
+ cipherType := x509.PEMCipherAES256
+
+ keyBlock, _ := pem.Decode(key)
+ if keyBlock == nil {
+ // This RootCA does not have a valid signer.
+ return nil, fmt.Errorf("error while decoding PEM key")
+ }
+
+ encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader,
+ "EC PRIVATE KEY",
+ keyBlock.Bytes,
+ passphrase,
+ cipherType)
+ if err != nil {
+ return nil, err
+ }
+
+ if encryptedPEMBlock.Headers == nil {
+ return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced")
+ }
+
+ return pem.EncodeToMemory(encryptedPEMBlock), nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/ca/config.go b/vendor/src/github.com/docker/swarmkit/ca/config.go
new file mode 100644
index 0000000000..61489eee95
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/ca/config.go
@@ -0,0 +1,513 @@
+package ca
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ log "github.com/Sirupsen/logrus"
+ cfconfig "github.com/cloudflare/cfssl/config"
+ "github.com/docker/distribution/digest"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/picker"
+
+ "golang.org/x/net/context"
+)
+
+const (
+ rootCACertFilename = "swarm-root-ca.crt"
+ rootCAKeyFilename = "swarm-root-ca.key"
+ nodeTLSCertFilename = "swarm-node.crt"
+ nodeTLSKeyFilename = "swarm-node.key"
+ nodeCSRFilename = "swarm-node.csr"
+)
+
+const (
+ rootCN = "swarm-ca"
+ // ManagerRole represents the Manager node type, and is used for authorization to endpoints
+ ManagerRole = "swarm-manager"
+ // AgentRole represents the Agent node type, and is used for authorization to endpoints
+ AgentRole = "swarm-worker"
+ // CARole represents the CA node type, and is used for clients attempting to get new certificates issued
+ CARole = "swarm-ca"
+)
+
+// SecurityConfig is used to represent a node's security configuration. It includes information about
+// the RootCA and ServerTLSCreds/ClientTLSCreds transport authenticators to be used for MTLS
+type SecurityConfig struct {
+ mu sync.Mutex
+
+ rootCA *RootCA
+
+ ServerTLSCreds *MutableTLSCreds
+ ClientTLSCreds *MutableTLSCreds
+}
+
+// CertificateUpdate represents a change in the underlying TLS configuration being returned by
+// a certificate renewal event.
+type CertificateUpdate struct {
+ Role string
+ Err error
+}
+
+// NewSecurityConfig initializes and returns a new SecurityConfig.
+func NewSecurityConfig(rootCA *RootCA, clientTLSCreds, serverTLSCreds *MutableTLSCreds) *SecurityConfig {
+ return &SecurityConfig{
+ rootCA: rootCA,
+ ClientTLSCreds: clientTLSCreds,
+ ServerTLSCreds: serverTLSCreds,
+ }
+}
+
+// RootCA returns the root CA.
+func (s *SecurityConfig) RootCA() *RootCA {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return s.rootCA
+}
+
+// UpdateRootCA replaces the root CA with a new root CA based on the specified
+// certificate, key, and the number of hours the certificates issue should last.
+func (s *SecurityConfig) UpdateRootCA(cert, key []byte, certExpiry time.Duration) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ rootCA, err := NewRootCA(cert, key, certExpiry)
+ if err == nil {
+ s.rootCA = &rootCA
+ }
+
+ return err
+}
+
+// DefaultPolicy is the default policy used by the signers to ensure that the only fields
+// from the remote CSRs we trust are: PublicKey, PublicKeyAlgorithm and SignatureAlgorithm.
+func DefaultPolicy() *cfconfig.Signing {
+ return SigningPolicy(DefaultNodeCertExpiration)
+}
+
+// SigningPolicy creates a policy used by the signer to ensure that the only fields
+// from the remote CSRs we trust are: PublicKey, PublicKeyAlgorithm and SignatureAlgorithm.
+// It receives the duration a certificate will be valid for
+func SigningPolicy(certExpiry time.Duration) *cfconfig.Signing {
+ // Force the minimum Certificate expiration to be fifteen minutes
+ if certExpiry < MinNodeCertExpiration {
+ certExpiry = DefaultNodeCertExpiration
+ }
+
+ return &cfconfig.Signing{
+ Default: &cfconfig.SigningProfile{
+ Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
+ Expiry: certExpiry,
+ // Only trust the key components from the CSR. Everything else should
+ // come directly from API call params.
+ CSRWhitelist: &cfconfig.CSRWhitelist{
+ PublicKey: true,
+ PublicKeyAlgorithm: true,
+ SignatureAlgorithm: true,
+ },
+ },
+ }
+}
+
+// SecurityConfigPaths is used as a helper to hold all the paths of security relevant files
+type SecurityConfigPaths struct {
+ Node, RootCA CertPaths
+}
+
+// NewConfigPaths returns the absolute paths to all of the different types of files
+func NewConfigPaths(baseCertDir string) *SecurityConfigPaths {
+ return &SecurityConfigPaths{
+ Node: CertPaths{
+ Cert: filepath.Join(baseCertDir, nodeTLSCertFilename),
+ Key: filepath.Join(baseCertDir, nodeTLSKeyFilename)},
+ RootCA: CertPaths{
+ Cert: filepath.Join(baseCertDir, rootCACertFilename),
+ Key: filepath.Join(baseCertDir, rootCAKeyFilename)},
+ }
+}
+
+// LoadOrCreateSecurityConfig encapsulates the security logic behind joining a cluster.
+// Every node requires at least a set of TLS certificates with which to join the cluster with.
+// In the case of a manager, these certificates will be used both for client and server credentials.
+func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret, proposedRole string, picker *picker.Picker, nodeInfo chan<- string) (*SecurityConfig, error) {
+ paths := NewConfigPaths(baseCertDir)
+
+ var (
+ rootCA RootCA
+ serverTLSCreds, clientTLSCreds *MutableTLSCreds
+ err error
+ )
+
+ // Check if we already have a CA certificate on disk. We need a CA to have a valid SecurityConfig
+ rootCA, err = GetLocalRootCA(baseCertDir)
+ switch err {
+ case nil:
+ log.Debugf("loaded local CA certificate: %s.", paths.RootCA.Cert)
+ case ErrNoLocalRootCA:
+ log.Debugf("no valid local CA certificate found: %v", err)
+
+ // Get a digest for the optional CA hash string that we've been provided
+ // If we were provided a non-empty string, and it is an invalid hash, return
+ // otherwise, allow the invalid digest through.
+ d, err := digest.ParseDigest(caHash)
+ if err != nil && caHash != "" {
+ return nil, err
+ }
+
+ // Get the remote CA certificate, verify integrity with the hash provided
+ rootCA, err = GetRemoteCA(ctx, d, picker)
+ if err != nil {
+ return nil, err
+ }
+
+ // Save root CA certificate to disk
+ if err = saveRootCA(rootCA, paths.RootCA); err != nil {
+ return nil, err
+ }
+
+ log.Debugf("downloaded remote CA certificate.")
+ default:
+ return nil, err
+ }
+
+ // At this point we've successfully loaded the CA details from disk, or successfully
+ // downloaded them remotely.
+ // The next step is to try to load our certificates.
+ clientTLSCreds, serverTLSCreds, err = LoadTLSCreds(rootCA, paths.Node)
+ if err != nil {
+ log.Debugf("no valid local TLS credentials found: %v", err)
+
+ var (
+ tlsKeyPair *tls.Certificate
+ err error
+ )
+
+ if rootCA.CanSign() {
+ // Create a new random ID for this certificate
+ cn := identity.NewNodeID()
+ org := identity.NewID()
+
+ if nodeInfo != nil {
+ nodeInfo <- cn
+ }
+ tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
+ } else {
+ // There was an error loading our Credentials, let's get a new certificate issued
+ // Last argument is nil because at this point we don't have any valid TLS creds
+ tlsKeyPair, err = rootCA.RequestAndSaveNewCertificates(ctx, paths.Node, proposedRole, secret, picker, nil, nodeInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ // Create the Server TLS Credentials for this node. These will not be used by agents.
+ serverTLSCreds, err = rootCA.NewServerTLSCredentials(tlsKeyPair)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a TLSConfig to be used when this node connects as a client to another remote node.
+ // We're using ManagerRole as remote serverName for TLS host verification
+ clientTLSCreds, err = rootCA.NewClientTLSCredentials(tlsKeyPair, ManagerRole)
+ if err != nil {
+ return nil, err
+ }
+ log.Debugf("new TLS credentials generated: %s.", paths.Node.Cert)
+ } else {
+ if nodeInfo != nil {
+ nodeInfo <- clientTLSCreds.NodeID()
+ }
+ log.Debugf("loaded local TLS credentials: %s.", paths.Node.Cert)
+ }
+
+ return &SecurityConfig{
+ rootCA: &rootCA,
+
+ ServerTLSCreds: serverTLSCreds,
+ ClientTLSCreds: clientTLSCreds,
+ }, nil
+}
+
+// RenewTLSConfig will continuously monitor for the necessity of renewing the local certificates, either by
+// issuing them locally if key-material is available, or requesting them from a remote CA.
+func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string, picker *picker.Picker, renew <-chan struct{}) <-chan CertificateUpdate {
+ paths := NewConfigPaths(baseCertDir)
+ updates := make(chan CertificateUpdate)
+
+ go func() {
+ var retry time.Duration
+ defer close(updates)
+ for {
+ // Our starting default will be 5 minutes
+ retry = 5 * time.Minute
+
+ // Since the expiration of the certificate is managed remotely we should update our
+ // retry timer on every iteration of this loop.
+ // Retrieve the time until the certificate expires.
+ expiresIn, err := readCertExpiration(paths.Node)
+ if err != nil {
+ // We failed to read the expiration, let's stick with the starting default
+ log.Errorf("failed to read the expiration of the TLS certificate in: %s", paths.Node.Cert)
+ updates <- CertificateUpdate{Err: fmt.Errorf("failed to read certificate expiration")}
+ } else {
+ // If we have an expired certificate, we let's stick with the starting default in
+ // the hope that this is a temporary clock skew.
+ if expiresIn.Minutes() < 0 {
+ log.Debugf("failed to create a new client TLS config: %v", err)
+ updates <- CertificateUpdate{Err: fmt.Errorf("TLS Certificate is expired")}
+ } else {
+ // Random retry time between 50% and 80% of the total time to expiration
+ retry = calculateRandomExpiry(expiresIn)
+ }
+ }
+
+ select {
+ case <-time.After(retry):
+ case <-renew:
+ case <-ctx.Done():
+ return
+ }
+ log.Infof("Renewing TLS Certificate.")
+
+ // Let's request new certs. Renewals don't require a secret.
+ rootCA := s.RootCA()
+ tlsKeyPair, err := rootCA.RequestAndSaveNewCertificates(ctx,
+ paths.Node,
+ s.ClientTLSCreds.Role(),
+ "",
+ picker,
+ s.ClientTLSCreds,
+ nil)
+ if err != nil {
+ log.Debugf("failed to renew the TLS Certificate: %v", err)
+ updates <- CertificateUpdate{Err: err}
+ continue
+ }
+
+ clientTLSConfig, err := NewClientTLSConfig(tlsKeyPair, rootCA.Pool, CARole)
+ if err != nil {
+ log.Debugf("failed to create a new client TLS config: %v", err)
+ updates <- CertificateUpdate{Err: err}
+ }
+ serverTLSConfig, err := NewServerTLSConfig(tlsKeyPair, rootCA.Pool)
+ if err != nil {
+ log.Debugf("failed to create a new server TLS config: %v", err)
+ updates <- CertificateUpdate{Err: err}
+ }
+
+ err = s.ClientTLSCreds.LoadNewTLSConfig(clientTLSConfig)
+ if err != nil {
+ log.Debugf("failed to update the client TLS credentials: %v", err)
+ updates <- CertificateUpdate{Err: err}
+ }
+
+ err = s.ServerTLSCreds.LoadNewTLSConfig(serverTLSConfig)
+ if err != nil {
+ log.Debugf("failed to update the server TLS credentials: %v", err)
+ updates <- CertificateUpdate{Err: err}
+ }
+
+ updates <- CertificateUpdate{Role: s.ClientTLSCreds.Role()}
+ }
+ }()
+
+ return updates
+}
+
+// calculateRandomExpiry returns a random duration between 50% and 80% of the original
+// duration
+func calculateRandomExpiry(expiresIn time.Duration) time.Duration {
+ if expiresIn.Minutes() < 1 {
+ return time.Second
+ }
+
+ var randomExpiry int
+ // Our lower bound of renewal will be half of the total expiration time
+ minValidity := int(expiresIn.Minutes() * CertLowerRotationRange)
+ // Our upper bound of renewal will be 80% of the total expiration time
+ maxValidity := int(expiresIn.Minutes() * CertUpperRotationRange)
+ // Let's select a random number of minutes between min and max, and set our retry for that
+ // Using randomly selected rotation allows us to avoid certificate thundering herds.
+ if maxValidity-minValidity < 1 {
+ randomExpiry = minValidity
+ } else {
+ randomExpiry = rand.Intn(maxValidity-minValidity) + int(minValidity)
+ }
+
+ return time.Duration(randomExpiry) * time.Minute
+}
+
+// LoadTLSCreds loads tls credentials from the specified path and verifies that
+// thay are valid for the RootCA.
+func LoadTLSCreds(rootCA RootCA, paths CertPaths) (*MutableTLSCreds, *MutableTLSCreds, error) {
+ // Read both the Cert and Key from disk
+ cert, err := ioutil.ReadFile(paths.Cert)
+ if err != nil {
+ return nil, nil, err
+ }
+ key, err := ioutil.ReadFile(paths.Key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Create an x509 certificate out of the contents on disk
+ certBlock, _ := pem.Decode([]byte(cert))
+ if certBlock == nil {
+ return nil, nil, fmt.Errorf("failed to parse certificate PEM")
+ }
+
+ // Create an X509Cert so we can .Verify()
+ X509Cert, err := x509.ParseCertificate(certBlock.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Include our root pool
+ opts := x509.VerifyOptions{
+ Roots: rootCA.Pool,
+ }
+
+ // Check to see if this certificate was signed by our CA, and isn't expired
+ if _, err := X509Cert.Verify(opts); err != nil {
+ return nil, nil, err
+ }
+
+ // Now that we know this certificate is valid, create a TLS Certificate for our
+ // credentials
+ var (
+ keyPair tls.Certificate
+ newErr error
+ )
+ keyPair, err = tls.X509KeyPair(cert, key)
+ if err != nil {
+ // This current keypair isn't valid. It's possible we crashed before we
+ // overwrote the current key. Let's try loading it from disk.
+ tempPaths := genTempPaths(paths)
+ key, newErr = ioutil.ReadFile(tempPaths.Key)
+ if newErr != nil {
+ return nil, nil, err
+ }
+
+ keyPair, newErr = tls.X509KeyPair(cert, key)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Load the Certificates as server credentials
+ serverTLSCreds, err := rootCA.NewServerTLSCredentials(&keyPair)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Load the Certificates also as client credentials.
+ // Both Agents and Managers always connect to remote Managers,
+ // so ServerName is always set to ManagerRole here.
+ clientTLSCreds, err := rootCA.NewClientTLSCredentials(&keyPair, ManagerRole)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return clientTLSCreds, serverTLSCreds, nil
+}
+
+func genTempPaths(path CertPaths) CertPaths {
+ return CertPaths{
+ Key: filepath.Join(filepath.Dir(path.Key), "."+filepath.Base(path.Key)),
+ Cert: filepath.Join(filepath.Dir(path.Cert), "."+filepath.Base(path.Cert)),
+ }
+}
+
+// NewServerTLSConfig returns a tls.Config configured for a TLS Server, given a tls.Certificate
+// and the PEM-encoded root CA Certificate
+func NewServerTLSConfig(cert *tls.Certificate, rootCAPool *x509.CertPool) (*tls.Config, error) {
+ if rootCAPool == nil {
+ return nil, fmt.Errorf("valid root CA pool required")
+ }
+
+ return &tls.Config{
+ Certificates: []tls.Certificate{*cert},
+ // Since we're using the same CA server to issue Certificates to new nodes, we can't
+ // use tls.RequireAndVerifyClientCert
+ ClientAuth: tls.VerifyClientCertIfGiven,
+ RootCAs: rootCAPool,
+ ClientCAs: rootCAPool,
+ PreferServerCipherSuites: true,
+ MinVersion: tls.VersionTLS12,
+ }, nil
+}
+
+// NewClientTLSConfig returns a tls.Config configured for a TLS Client, given a tls.Certificate
+// the PEM-encoded root CA Certificate, and the name of the remote server the client wants to connect to.
+func NewClientTLSConfig(cert *tls.Certificate, rootCAPool *x509.CertPool, serverName string) (*tls.Config, error) {
+ if rootCAPool == nil {
+ return nil, fmt.Errorf("valid root CA pool required")
+ }
+
+ return &tls.Config{
+ ServerName: serverName,
+ Certificates: []tls.Certificate{*cert},
+ RootCAs: rootCAPool,
+ MinVersion: tls.VersionTLS12,
+ }, nil
+}
+
+// NewClientTLSCredentials returns GRPC credentials for a TLS GRPC client, given a tls.Certificate
+// a PEM-Encoded root CA Certificate, and the name of the remote server the client wants to connect to.
+func (rca *RootCA) NewClientTLSCredentials(cert *tls.Certificate, serverName string) (*MutableTLSCreds, error) {
+ tlsConfig, err := NewClientTLSConfig(cert, rca.Pool, serverName)
+ if err != nil {
+ return nil, err
+ }
+
+ mtls, err := NewMutableTLS(tlsConfig)
+
+ return mtls, err
+}
+
+// NewServerTLSCredentials returns GRPC credentials for a TLS GRPC client, given a tls.Certificate
+// a PEM-Encoded root CA Certificate, and the name of the remote server the client wants to connect to.
+func (rca *RootCA) NewServerTLSCredentials(cert *tls.Certificate) (*MutableTLSCreds, error) {
+ tlsConfig, err := NewServerTLSConfig(cert, rca.Pool)
+ if err != nil {
+ return nil, err
+ }
+
+ mtls, err := NewMutableTLS(tlsConfig)
+
+ return mtls, err
+}
+
+// ParseRole parses an apiRole into an internal role string
+func ParseRole(apiRole api.NodeRole) (string, error) {
+ switch apiRole {
+ case api.NodeRoleManager:
+ return ManagerRole, nil
+ case api.NodeRoleWorker:
+ return AgentRole, nil
+ default:
+ return "", fmt.Errorf("failed to parse api role: %v", apiRole)
+ }
+}
+
+// FormatRole parses an internal role string into an apiRole
+func FormatRole(role string) (api.NodeRole, error) {
+ switch strings.ToLower(role) {
+ case strings.ToLower(ManagerRole):
+ return api.NodeRoleManager, nil
+ case strings.ToLower(AgentRole):
+ return api.NodeRoleWorker, nil
+ default:
+ return 0, fmt.Errorf("failed to parse role: %s", role)
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/ca/forward.go b/vendor/src/github.com/docker/swarmkit/ca/forward.go
new file mode 100644
index 0000000000..ca84af3ff0
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/ca/forward.go
@@ -0,0 +1,67 @@
+package ca
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/metadata"
+)
+
+const (
+ certForwardedKey = "forwarded_cert"
+ certCNKey = "forwarded_cert_cn"
+ certOUKey = "forwarded_cert_ou"
+ certOrgKey = "forwarded_cert_org"
+)
+
+// forwardedTLSInfoFromContext obtains forwarded TLS CN/OU from the grpc.MD
+// object in ctx.
+func forwardedTLSInfoFromContext(ctx context.Context) (string, string, []string) {
+ var cn, org string
+ md, _ := metadata.FromContext(ctx)
+ if len(md[certCNKey]) != 0 {
+ cn = md[certCNKey][0]
+ }
+ if len(md[certOrgKey]) != 0 {
+ org = md[certOrgKey][0]
+ }
+ return cn, org, md[certOUKey]
+}
+
+func isForwardedRequest(ctx context.Context) bool {
+ md, _ := metadata.FromContext(ctx)
+ if len(md[certForwardedKey]) != 1 {
+ return false
+ }
+ return md[certForwardedKey][0] == "true"
+}
+
+// WithMetadataForwardTLSInfo reads certificate from context and returns context where
+// ForwardCert is set based on original certificate.
+func WithMetadataForwardTLSInfo(ctx context.Context) (context.Context, error) {
+ md, ok := metadata.FromContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+
+ ous := []string{}
+ org := ""
+ cn := ""
+
+ certSubj, err := certSubjectFromContext(ctx)
+ if err == nil {
+ cn = certSubj.CommonName
+ ous = certSubj.OrganizationalUnit
+ if len(certSubj.Organization) > 0 {
+ org = certSubj.Organization[0]
+ }
+ }
+ // If there's no TLS cert, forward with blank TLS metadata.
+ // Note that the presence of this blank metadata is extremely
+ // important. Without it, it would look like manager is making
+ // the request directly.
+ md[certForwardedKey] = []string{"true"}
+ md[certCNKey] = []string{cn}
+ md[certOrgKey] = []string{org}
+ md[certOUKey] = ous
+
+ return metadata.NewContext(ctx, md), nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/ca/server.go b/vendor/src/github.com/docker/swarmkit/ca/server.go
new file mode 100644
index 0000000000..1bbc77fae4
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/ca/server.go
@@ -0,0 +1,648 @@
+package ca
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/crypto/bcrypt"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+// Server is the CA and NodeCA API gRPC server.
+// TODO(diogo): At some point we may want to have separate implementations of
+// CA, NodeCA, and other hypothetical future CA services. At the moment,
+// breaking it apart doesn't seem worth it.
+type Server struct {
+ mu sync.Mutex
+ wg sync.WaitGroup
+ ctx context.Context
+ cancel func()
+ store *store.MemoryStore
+ securityConfig *SecurityConfig
+ acceptancePolicy *api.AcceptancePolicy
+}
+
+// DefaultAcceptancePolicy returns the default acceptance policy.
+func DefaultAcceptancePolicy() api.AcceptancePolicy {
+ return api.AcceptancePolicy{
+ Policies: []*api.AcceptancePolicy_RoleAdmissionPolicy{
+ {
+ Role: api.NodeRoleWorker,
+ Autoaccept: true,
+ },
+ {
+ Role: api.NodeRoleManager,
+ Autoaccept: false,
+ },
+ },
+ }
+}
+
+// DefaultCAConfig returns the default CA Config, with a default expiration.
+func DefaultCAConfig() api.CAConfig {
+ return api.CAConfig{
+ NodeCertExpiry: ptypes.DurationProto(DefaultNodeCertExpiration),
+ }
+}
+
+// NewServer creates a CA API server.
+func NewServer(store *store.MemoryStore, securityConfig *SecurityConfig) *Server {
+ return &Server{
+ store: store,
+ securityConfig: securityConfig,
+ }
+}
+
+// NodeCertificateStatus returns the current issuance status of an issuance request identified by the nodeID
+func (s *Server) NodeCertificateStatus(ctx context.Context, request *api.NodeCertificateStatusRequest) (*api.NodeCertificateStatusResponse, error) {
+ if request.NodeID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, codes.InvalidArgument.String())
+ }
+
+ if err := s.addTask(); err != nil {
+ return nil, err
+ }
+ defer s.doneTask()
+
+ var node *api.Node
+
+ event := state.EventUpdateNode{
+ Node: &api.Node{ID: request.NodeID},
+ Checks: []state.NodeCheckFunc{state.NodeCheckID},
+ }
+
+ // Retrieve the current value of the certificate with this token, and create a watcher
+ updates, cancel, err := store.ViewAndWatch(
+ s.store,
+ func(tx store.ReadTx) error {
+ node = store.GetNode(tx, request.NodeID)
+ return nil
+ },
+ event,
+ )
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+
+ // This node ID doesn't exist
+ if node == nil {
+ return nil, grpc.Errorf(codes.NotFound, codes.NotFound.String())
+ }
+
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": node.ID,
+ "status": node.Certificate.Status,
+ "method": "NodeCertificateStatus",
+ })
+
+ // If this certificate has a final state, return it immediately (both pending and renew are transition states)
+ if isFinalState(node.Certificate.Status) {
+ return &api.NodeCertificateStatusResponse{
+ Status: &node.Certificate.Status,
+ Certificate: &node.Certificate,
+ }, nil
+ }
+
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": node.ID,
+ "status": node.Certificate.Status,
+ "method": "NodeCertificateStatus",
+ }).Debugf("started watching for certificate updates")
+
+ // Certificate is Pending or in an Unknown state, let's wait for changes.
+ for {
+ select {
+ case event := <-updates:
+ switch v := event.(type) {
+ case state.EventUpdateNode:
+ // We got an update on the certificate record. If the status is a final state,
+ // return the certificate.
+ if isFinalState(v.Node.Certificate.Status) {
+ cert := v.Node.Certificate.Copy()
+ return &api.NodeCertificateStatusResponse{
+ Status: &cert.Status,
+ Certificate: cert,
+ }, nil
+ }
+ }
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-s.ctx.Done():
+ return nil, s.ctx.Err()
+ }
+ }
+}
+
+// IssueNodeCertificate is responsible for gatekeeping both certificate requests from new nodes in the swarm,
+// and authorizing certificate renewals.
+// If a node presented a valid certificate, the corresponding certificate is set in a RENEW state.
+// If a node failed to present a valid certificate, we enforce all the policies currently configured in
+// the swarm for node acceptance: check for the validity of the presented secret and check what is the
+// acceptance state the certificate should be put in (PENDING or ACCEPTED).
+// After going through the configured policies, a new random node ID is generated, and the corresponding node
+// entry is created. IssueNodeCertificate is the only place where new node entries to raft should be created.
+func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNodeCertificateRequest) (*api.IssueNodeCertificateResponse, error) {
+ // First, let's see if the remote node is proposing to be added as a valid node, and with a non-empty CSR
+ if len(request.CSR) == 0 || (request.Role != api.NodeRoleWorker && request.Role != api.NodeRoleManager) {
+ return nil, grpc.Errorf(codes.InvalidArgument, codes.InvalidArgument.String())
+ }
+
+ if err := s.addTask(); err != nil {
+ return nil, err
+ }
+ defer s.doneTask()
+
+ // If the remote node is an Agent (either forwarded by a manager, or calling directly),
+ // issue a renew agent certificate entry with the correct ID
+ nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{AgentRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
+ if err == nil {
+ return s.issueRenewCertificate(ctx, nodeID, request.CSR)
+ }
+
+ // If the remote node is a Manager (either forwarded by another manager, or calling directly),
+ // issue a renew certificate entry with the correct ID
+ nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
+ if err == nil {
+ return s.issueRenewCertificate(ctx, nodeID, request.CSR)
+ }
+
+ // The remote node didn't successfully present a valid MTLS certificate, let's issue a PENDING
+ // certificate with a new random ID
+ nodeMembership := api.NodeMembershipPending
+
+ // If there are acceptance policies configured in the system, we should enforce them
+ policy := s.getRolePolicy(request.Role)
+ if policy != nil {
+ // If the policy has a Secret set, let's verify it
+ if policy.Secret != nil {
+ if err := checkSecretValidity(policy, request.Secret); err != nil {
+ return nil, grpc.Errorf(codes.InvalidArgument, "A valid secret token is necessary to join this cluster: %v", err)
+ }
+ }
+ // Check to see if our autoacceptance policy allows this node to be issued without manual intervention
+ if policy.Autoaccept {
+ nodeMembership = api.NodeMembershipAccepted
+ }
+ }
+
+ // Max number of collisions of ID or CN to tolerate before giving up
+ maxRetries := 3
+ // Generate a random ID for this new node
+ for i := 0; ; i++ {
+ nodeID = identity.NewNodeID()
+
+ // Create a new node
+ err := s.store.Update(func(tx store.Tx) error {
+ node := &api.Node{
+ ID: nodeID,
+ Certificate: api.Certificate{
+ CSR: request.CSR,
+ CN: nodeID,
+ Role: request.Role,
+ Status: api.IssuanceStatus{
+ State: api.IssuanceStatePending,
+ },
+ },
+ Spec: api.NodeSpec{
+ Role: request.Role,
+ Membership: nodeMembership,
+ },
+ }
+
+ return store.CreateNode(tx, node)
+ })
+ if err == nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": nodeID,
+ "node.role": request.Role,
+ "method": "IssueNodeCertificate",
+ }).Debugf("new certificate entry added")
+ break
+ }
+ if err != store.ErrExist {
+ return nil, err
+ }
+ if i == maxRetries {
+ return nil, err
+ }
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": nodeID,
+ "node.role": request.Role,
+ "method": "IssueNodeCertificate",
+ }).Errorf("randomly generated node ID collided with an existing one - retrying")
+ }
+
+ return &api.IssueNodeCertificateResponse{
+ NodeID: nodeID,
+ }, nil
+}
+
+// checkSecretValidity verifies if a secret string matches the secret hash stored in the
+// Acceptance Policy. It currently only supports bcrypted hashes.
+func checkSecretValidity(policy *api.AcceptancePolicy_RoleAdmissionPolicy, secret string) error {
+ if policy == nil || secret == "" {
+ return fmt.Errorf("invalid policy or secret")
+ }
+
+ switch strings.ToLower(policy.Secret.Alg) {
+ case "bcrypt":
+ return bcrypt.CompareHashAndPassword(policy.Secret.Data, []byte(secret))
+ }
+
+ return fmt.Errorf("hash algorithm not supported: %s", policy.Secret.Alg)
+}
+
+// getRolePolicy is a helper method that returns all the admission policies that should be
+// enforced for a particular role
+func (s *Server) getRolePolicy(role api.NodeRole) *api.AcceptancePolicy_RoleAdmissionPolicy {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.acceptancePolicy != nil && len(s.acceptancePolicy.Policies) > 0 {
+ // Let's go through all the configured policies and try to find one for this role
+ for _, p := range s.acceptancePolicy.Policies {
+ if role == p.Role {
+ return p
+ }
+ }
+ }
+
+ return nil
+}
+
+// issueRenewCertificate receives a nodeID and a CSR and modifies the node's certificate entry with the new CSR
+// and changes the state to RENEW, so it can be picked up and signed by the signing reconciliation loop
+func (s *Server) issueRenewCertificate(ctx context.Context, nodeID string, csr []byte) (*api.IssueNodeCertificateResponse, error) {
+ var cert api.Certificate
+ err := s.store.Update(func(tx store.Tx) error {
+
+ // Attempt to retrieve the node with nodeID
+ node := store.GetNode(tx, nodeID)
+ if node == nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": nodeID,
+ "method": "issueRenewCertificate",
+ }).Warnf("node does not exist")
+ // If this node doesn't exist, we shouldn't be renewing a certificate for it
+ return grpc.Errorf(codes.NotFound, "node %s not found when attempting to renew certificate", nodeID)
+ }
+
+ // Create a new Certificate entry for this node with the new CSR and a RENEW state
+ cert = api.Certificate{
+ CSR: csr,
+ CN: node.ID,
+ Role: node.Spec.Role,
+ Status: api.IssuanceStatus{
+ State: api.IssuanceStateRenew,
+ },
+ }
+
+ node.Certificate = cert
+ return store.UpdateNode(tx, node)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ log.G(ctx).WithFields(logrus.Fields{
+ "cert.cn": cert.CN,
+ "cert.role": cert.Role,
+ "method": "issueRenewCertificate",
+ }).Debugf("node certificate updated")
+ return &api.IssueNodeCertificateResponse{
+ NodeID: nodeID,
+ }, nil
+}
+
+// GetRootCACertificate returns the certificate of the Root CA. It is used as a convinience for distributing
+// the root of trust for the swarm. Clients should be using the CA hash to verify if they weren't target to
+// a MiTM. If they fail to do so, node bootstrap works with TOFU semantics.
+func (s *Server) GetRootCACertificate(ctx context.Context, request *api.GetRootCACertificateRequest) (*api.GetRootCACertificateResponse, error) {
+ log.G(ctx).WithFields(logrus.Fields{
+ "method": "GetRootCACertificate",
+ })
+
+ return &api.GetRootCACertificateResponse{
+ Certificate: s.securityConfig.RootCA().Cert,
+ }, nil
+}
+
+// Run runs the CA signer main loop.
+// The CA signer can be stopped with cancelling ctx or calling Stop().
+func (s *Server) Run(ctx context.Context) error {
+ s.mu.Lock()
+ if s.isRunning() {
+ s.mu.Unlock()
+ return fmt.Errorf("CA signer is stopped")
+ }
+ s.wg.Add(1)
+ defer s.wg.Done()
+ logger := log.G(ctx).WithField("module", "ca")
+ ctx = log.WithLogger(ctx, logger)
+ s.ctx, s.cancel = context.WithCancel(ctx)
+ s.mu.Unlock()
+
+ // Retrieve the channels to keep track of changes in the cluster
+ // Retrieve all the currently registered nodes
+ var nodes []*api.Node
+ updates, cancel, err := store.ViewAndWatch(
+ s.store,
+ func(readTx store.ReadTx) error {
+ clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
+ if err != nil {
+ return err
+ }
+ if len(clusters) != 1 {
+ return fmt.Errorf("could not find cluster object")
+ }
+ s.updateCluster(ctx, clusters[0])
+
+ nodes, err = store.FindNodes(readTx, store.All)
+ return err
+ },
+ state.EventCreateNode{},
+ state.EventUpdateNode{},
+ state.EventUpdateCluster{},
+ )
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "method": "(*Server).Run",
+ }).WithError(err).Errorf("snapshot store view failed")
+ return err
+ }
+ defer cancel()
+
+ // We might have missed some updates if there was a leader election,
+ // so let's pick up the slack.
+ if err := s.reconcileNodeCertificates(ctx, nodes); err != nil {
+ // We don't return here because that means the Run loop would
+ // never run. Log an error instead.
+ log.G(ctx).WithFields(logrus.Fields{
+ "method": "(*Server).Run",
+ }).WithError(err).Errorf("error attempting to reconcile certificates")
+ }
+
+ // Watch for new nodes being created, new nodes being updated, and changes
+ // to the cluster
+ for {
+ select {
+ case event := <-updates:
+ switch v := event.(type) {
+ case state.EventCreateNode:
+ s.evaluateAndSignNodeCert(ctx, v.Node)
+ case state.EventUpdateNode:
+ // If this certificate is already at a final state
+ // no need to evaluate and sign it.
+ if !isFinalState(v.Node.Certificate.Status) {
+ s.evaluateAndSignNodeCert(ctx, v.Node)
+ }
+ case state.EventUpdateCluster:
+ s.updateCluster(ctx, v.Cluster)
+ }
+
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-s.ctx.Done():
+ return nil
+ }
+ }
+}
+
+// Stop stops the CA and closes all grpc streams.
+func (s *Server) Stop() error {
+ s.mu.Lock()
+ if !s.isRunning() {
+ return fmt.Errorf("CA signer is already stopped")
+ }
+ s.cancel()
+ s.mu.Unlock()
+ // wait for all handlers to finish their CA deals,
+ s.wg.Wait()
+ return nil
+}
+
+func (s *Server) addTask() error {
+ s.mu.Lock()
+ if !s.isRunning() {
+ s.mu.Unlock()
+ return grpc.Errorf(codes.Aborted, "CA signer is stopped")
+ }
+ s.wg.Add(1)
+ s.mu.Unlock()
+ return nil
+}
+
+func (s *Server) doneTask() {
+ s.wg.Done()
+}
+
+func (s *Server) isRunning() bool {
+ if s.ctx == nil {
+ return false
+ }
+ select {
+ case <-s.ctx.Done():
+ return false
+ default:
+ }
+ return true
+}
+
+// updateCluster is called when there are cluster changes, and it ensures that the local RootCA is
+// always aware of changes in clusterExpiry and the Root CA key material
+func (s *Server) updateCluster(ctx context.Context, cluster *api.Cluster) {
+ s.mu.Lock()
+ s.acceptancePolicy = cluster.Spec.AcceptancePolicy.Copy()
+ s.mu.Unlock()
+ var err error
+
+ // If the cluster has a RootCA, let's try to update our SecurityConfig to reflect the latest values
+ rCA := cluster.RootCA
+ if len(rCA.CACert) != 0 && len(rCA.CAKey) != 0 {
+ expiry := DefaultNodeCertExpiration
+ if cluster.Spec.CAConfig.NodeCertExpiry != nil {
+ // NodeCertExpiry exists, let's try to parse the duration out of it
+ clusterExpiry, err := ptypes.Duration(cluster.Spec.CAConfig.NodeCertExpiry)
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "cluster.id": cluster.ID,
+ "method": "(*Server).updateCluster",
+ }).WithError(err).Warn("failed to parse certificate expiration, using default")
+ } else {
+ // We were able to successfully parse the expiration out of the cluster.
+ expiry = clusterExpiry
+ }
+ } else {
+ // NodeCertExpiry seems to be nil
+ log.G(ctx).WithFields(logrus.Fields{
+ "cluster.id": cluster.ID,
+ "method": "(*Server).updateCluster",
+ }).WithError(err).Warn("failed to parse certificate expiration, using default")
+
+ }
+ // Attempt to update our local RootCA with the new parameters
+ err = s.securityConfig.UpdateRootCA(rCA.CACert, rCA.CAKey, expiry)
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "cluster.id": cluster.ID,
+ "method": "(*Server).updateCluster",
+ }).WithError(err).Error("updating Root CA failed")
+ } else {
+ log.G(ctx).WithFields(logrus.Fields{
+ "cluster.id": cluster.ID,
+ "method": "(*Server).updateCluster",
+ }).Debugf("Root CA updated successfully")
+ }
+ }
+}
+
+// evaluateAndSignNodeCert implements the logic of which certificates to sign
+func (s *Server) evaluateAndSignNodeCert(ctx context.Context, node *api.Node) {
+ // If the desired membership and actual state are in sync, there's
+ // nothing to do.
+ if node.Spec.Membership == api.NodeMembershipAccepted && node.Certificate.Status.State == api.IssuanceStateIssued {
+ return
+ }
+
+ // If the certificate state is renew, then it is a server-sided accepted cert (cert renewals)
+ if node.Certificate.Status.State == api.IssuanceStateRenew {
+ s.signNodeCert(ctx, node)
+ return
+ }
+
+ // Sign this certificate if a user explicitly changed it to Accepted, and
+ // the certificate is in pending state
+ if node.Spec.Membership == api.NodeMembershipAccepted && node.Certificate.Status.State == api.IssuanceStatePending {
+ s.signNodeCert(ctx, node)
+ }
+}
+
+// signNodeCert does the bulk of the work for signing a certificate
+func (s *Server) signNodeCert(ctx context.Context, node *api.Node) {
+ if !s.securityConfig.RootCA().CanSign() {
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": node.ID,
+ "method": "(*Server).signNodeCert",
+ }).Errorf("no valid signer found")
+ return
+ }
+
+ node = node.Copy()
+ nodeID := node.ID
+ // Convert the role from proto format
+ role, err := ParseRole(node.Certificate.Role)
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": node.ID,
+ "method": "(*Server).signNodeCert",
+ }).WithError(err).Errorf("failed to parse role")
+ return
+ }
+
+ // Attempt to sign the CSR
+ cert, err := s.securityConfig.RootCA().ParseValidateAndSignCSR(node.Certificate.CSR, node.Certificate.CN, role, s.securityConfig.ClientTLSCreds.Organization())
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": node.ID,
+ "method": "(*Server).signNodeCert",
+ }).WithError(err).Errorf("failed to sign CSR")
+ // If this error is due the lack of signer, maybe some other
+ // manager in the future will pick it up. Return without
+ // changing the state of the certificate.
+ if err == ErrNoValidSigner {
+ return
+ }
+ // If the current state is already Failed, no need to change it
+ if node.Certificate.Status.State == api.IssuanceStateFailed {
+ return
+ }
+ // We failed to sign this CSR, change the state to FAILED
+ err = s.store.Update(func(tx store.Tx) error {
+ node := store.GetNode(tx, nodeID)
+ if node == nil {
+ return fmt.Errorf("node %s not found", nodeID)
+ }
+
+ node.Certificate.Status = api.IssuanceStatus{
+ State: api.IssuanceStateFailed,
+ Err: err.Error(),
+ }
+
+ return store.UpdateNode(tx, node)
+ })
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": nodeID,
+ "method": "(*Server).signNodeCert",
+ }).WithError(err).Errorf("transaction failed when setting state to FAILED")
+ }
+ return
+ }
+
+ // We were able to successfully sign the new CSR. Let's try to update the nodeStore
+ for {
+ err = s.store.Update(func(tx store.Tx) error {
+ // Remote nodes are expecting a full certificate chain, not just a signed certificate
+ node.Certificate.Certificate = append(cert, s.securityConfig.RootCA().Cert...)
+ node.Certificate.Status = api.IssuanceStatus{
+ State: api.IssuanceStateIssued,
+ }
+
+ err := store.UpdateNode(tx, node)
+ if err != nil {
+ node = store.GetNode(tx, nodeID)
+ if node == nil {
+ err = fmt.Errorf("node %s does not exist", nodeID)
+ }
+ }
+ return err
+ })
+ if err == nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": node.ID,
+ "node.role": node.Certificate.Role,
+ "method": "(*Server).signNodeCert",
+ }).Debugf("certificate issued")
+ break
+ }
+ if err == store.ErrSequenceConflict {
+ continue
+ }
+
+ log.G(ctx).WithFields(logrus.Fields{
+ "node.id": nodeID,
+ "method": "(*Server).signNodeCert",
+ }).WithError(err).Errorf("transaction failed")
+ return
+ }
+}
+
+// reconcileNodeCertificates is a helper method that calles evaluateAndSignNodeCert on all the
+// nodes.
+func (s *Server) reconcileNodeCertificates(ctx context.Context, nodes []*api.Node) error {
+ for _, node := range nodes {
+ s.evaluateAndSignNodeCert(ctx, node)
+ }
+
+ return nil
+}
+
+// A successfully issued certificate and a failed certificate are our current final states
+func isFinalState(status api.IssuanceStatus) bool {
+ if status.State == api.IssuanceStateIssued || status.State == api.IssuanceStateFailed {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/src/github.com/docker/swarmkit/ca/transport.go b/vendor/src/github.com/docker/swarmkit/ca/transport.go
new file mode 100644
index 0000000000..d39dc64af7
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/ca/transport.go
@@ -0,0 +1,194 @@
+package ca
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc/credentials"
+
+ "golang.org/x/net/context"
+)
+
+var (
+ // alpnProtoStr are the specified application level protocols for gRPC.
+ alpnProtoStr = []string{"h2"}
+)
+
+type timeoutError struct{}
+
+func (timeoutError) Error() string { return "mutablecredentials: Dial timed out" }
+func (timeoutError) Timeout() bool { return true }
+func (timeoutError) Temporary() bool { return true }
+
+// MutableTLSCreds is the credentials required for authenticating a connection using TLS.
+type MutableTLSCreds struct {
+ // Mutex for the tls config
+ sync.Mutex
+ // TLS configuration
+ config *tls.Config
+ // TLS Credentials
+ tlsCreds credentials.TransportAuthenticator
+ // store the subject for easy access
+ subject pkix.Name
+}
+
+// Info implements the credentials.TransportAuthenticator interface
+func (c *MutableTLSCreds) Info() credentials.ProtocolInfo {
+ return credentials.ProtocolInfo{
+ SecurityProtocol: "tls",
+ SecurityVersion: "1.2",
+ }
+}
+
+// GetRequestMetadata implements the credentials.TransportAuthenticator interface
+func (c *MutableTLSCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ return nil, nil
+}
+
+// RequireTransportSecurity implements the credentials.TransportAuthenticator interface
+func (c *MutableTLSCreds) RequireTransportSecurity() bool {
+ return true
+}
+
+// ClientHandshake implements the credentials.TransportAuthenticator interface
+func (c *MutableTLSCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, credentials.AuthInfo, error) {
+ // borrow all the code from the original TLS credentials
+ var errChannel chan error
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- timeoutError{}
+ })
+ }
+ c.Lock()
+ if c.config.ServerName == "" {
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ c.config.ServerName = addr[:colonPos]
+ }
+
+ conn := tls.Client(rawConn, c.config)
+ // Need to allow conn.Handshake to have access to config,
+ // would create a deadlock otherwise
+ c.Unlock()
+ var err error
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+ err = <-errChannel
+ }
+ if err != nil {
+ rawConn.Close()
+ return nil, nil, err
+ }
+
+ return conn, nil, nil
+}
+
+// ServerHandshake implements the credentials.TransportAuthenticator interface
+func (c *MutableTLSCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ c.Lock()
+ conn := tls.Server(rawConn, c.config)
+ c.Unlock()
+ if err := conn.Handshake(); err != nil {
+ rawConn.Close()
+ return nil, nil, err
+ }
+
+ return conn, credentials.TLSInfo{State: conn.ConnectionState()}, nil
+}
+
+// LoadNewTLSConfig replaces the currently loaded TLS config with a new one
+func (c *MutableTLSCreds) LoadNewTLSConfig(newConfig *tls.Config) error {
+ newSubject, err := GetAndValidateCertificateSubject(newConfig.Certificates)
+ if err != nil {
+ return err
+ }
+
+ c.Lock()
+ defer c.Unlock()
+ c.subject = newSubject
+ c.config = newConfig
+
+ return nil
+}
+
+// Role returns the OU for the certificate encapsulated in this TransportAuthenticator
+func (c *MutableTLSCreds) Role() string {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.subject.OrganizationalUnit[0]
+}
+
+// Organization returns the O for the certificate encapsulated in this TransportAuthenticator
+func (c *MutableTLSCreds) Organization() string {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.subject.Organization[0]
+}
+
+// NodeID returns the CN for the certificate encapsulated in this TransportAuthenticator
+func (c *MutableTLSCreds) NodeID() string {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.subject.CommonName
+}
+
+// NewMutableTLS uses c to construct a mutable TransportAuthenticator based on TLS.
+func NewMutableTLS(c *tls.Config) (*MutableTLSCreds, error) {
+ originalTC := credentials.NewTLS(c)
+
+ if len(c.Certificates) < 1 {
+ return nil, fmt.Errorf("invalid configuration: needs at least one certificate")
+ }
+
+ subject, err := GetAndValidateCertificateSubject(c.Certificates)
+ if err != nil {
+ return nil, err
+ }
+
+ tc := &MutableTLSCreds{config: c, tlsCreds: originalTC, subject: subject}
+ tc.config.NextProtos = alpnProtoStr
+
+ return tc, nil
+}
+
+// GetAndValidateCertificateSubject is a helper method to retrieve and validate the subject
+// from the x509 certificate underlying a tls.Certificate
+func GetAndValidateCertificateSubject(certs []tls.Certificate) (pkix.Name, error) {
+ for i := range certs {
+ cert := &certs[i]
+ x509Cert, err := x509.ParseCertificate(cert.Certificate[0])
+ if err != nil {
+ continue
+ }
+ if len(x509Cert.Subject.OrganizationalUnit) < 1 {
+ return pkix.Name{}, fmt.Errorf("no OU found in certificate subject")
+ }
+
+ if len(x509Cert.Subject.Organization) < 1 {
+ return pkix.Name{}, fmt.Errorf("no organization found in certificate subject")
+ }
+ if x509Cert.Subject.CommonName == "" {
+ return pkix.Name{}, fmt.Errorf("no valid subject names found for TLS configuration")
+ }
+
+ return x509Cert.Subject, nil
+ }
+
+ return pkix.Name{}, fmt.Errorf("no valid certificates found for TLS configuration")
+}
diff --git a/vendor/src/github.com/docker/swarmkit/identity/doc.go b/vendor/src/github.com/docker/swarmkit/identity/doc.go
new file mode 100644
index 0000000000..7ebb8104c7
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/identity/doc.go
@@ -0,0 +1,17 @@
+// Package identity provides functionality for generating and manager
+// identifiers within swarm. This includes entity identification, such as that
+// of Service, Task and Network but also cryptographically-secure Node identity.
+//
+// Random Identifiers
+//
+// Identifiers provided by this package are cryptographically-strong, random
+// 128 bit numbers encoded in Base36. This method is preferred over UUID4 since
+// it requires less storage and leverages the full 128 bits of entropy.
+//
+// Generating an identifier is simple. Simply call the `NewID` function, check
+// the error and proceed:
+//
+// id, err := NewID()
+// if err != nil { /* ... handle it, please ... */ }
+//
+package identity
diff --git a/vendor/src/github.com/docker/swarmkit/identity/randomid.go b/vendor/src/github.com/docker/swarmkit/identity/randomid.go
new file mode 100644
index 0000000000..9a1c2ff573
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/identity/randomid.go
@@ -0,0 +1,83 @@
+package identity
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "strconv"
+)
+
+var (
+ // idReader is used for random id generation. This declaration allows us to
+ // replace it for testing.
+ idReader = rand.Reader
+)
+
+// parameters for random identifier generation. We can tweak this when there is
+// time for further analysis.
+const (
+ randomIDEntropyBytes = 16
+ randomNodeIDEntropyBytes = 8
+ randomIDBase = 36
+
+ // To ensure that all identifiers are fixed length, we make sure they
+ // get padded out to 25 characters, which is the maximum for the base36
+ // representation of 128-bit identifiers.
+ //
+ // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value
+ // was calculated from floor(log(2^128-1, 36)) + 1.
+ //
+ // See http://mathworld.wolfram.com/NumberLength.html for more information.
+ maxRandomIDLength = 25
+ maxRandomNodeIDLength = 13
+)
+
+// NewID generates a new identifier for use where random identifiers with low
+// collision probability are required.
+//
+// With the parameters in this package, the generated identifier will provide
+// 128 bits of entropy encoded with base36. Leading padding is added if the
+// string is less 25 bytes. We do not intend to maintain this interface, so
+// identifiers should be treated opaquely.
+func NewID() string {
+ var p [randomIDEntropyBytes]byte
+
+ if _, err := io.ReadFull(idReader, p[:]); err != nil {
+ panic(fmt.Errorf("failed to read random bytes: %v", err))
+ }
+
+ var nn big.Int
+ nn.SetBytes(p[:])
+ return fmt.Sprintf("%0[1]*s", maxRandomIDLength, nn.Text(randomIDBase))
+}
+
+// NewNodeID generates a new identifier for identifying a node. These IDs
+// are shorter than the IDs returned by NewID, so they can be used directly
+// by Raft. Because they are short, they MUST be checked for collisions.
+func NewNodeID() string {
+ var p [randomNodeIDEntropyBytes]byte
+
+ if _, err := io.ReadFull(idReader, p[:]); err != nil {
+ panic(fmt.Errorf("failed to read random bytes: %v", err))
+ }
+
+ randomInt := binary.LittleEndian.Uint64(p[:])
+ return FormatNodeID(randomInt)
+}
+
+// FormatNodeID converts a node ID from uint64 to string format.
+// A string-formatted node ID looks like 1w8ynjwhcy4zd.
+func FormatNodeID(nodeID uint64) string {
+ return fmt.Sprintf("%0[1]*s", maxRandomNodeIDLength, strconv.FormatUint(nodeID, 36))
+}
+
+// ParseNodeID converts a node ID from string format to uint64.
+func ParseNodeID(nodeID string) (uint64, error) {
+ if len(nodeID) != maxRandomNodeIDLength {
+ return 0, errors.New("node ID has invalid length")
+ }
+ return strconv.ParseUint(nodeID, 36, 64)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/ioutils/ioutils.go b/vendor/src/github.com/docker/swarmkit/ioutils/ioutils.go
new file mode 100644
index 0000000000..25e2a7803a
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/ioutils/ioutils.go
@@ -0,0 +1,40 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// todo: split docker/pkg/ioutils into a separate repo
+
+// AtomicWriteFile atomically writes data to a file specified by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return err
+ }
+ err = os.Chmod(f.Name(), perm)
+ if err != nil {
+ f.Close()
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ f.Close()
+ return io.ErrShortWrite
+ }
+ if err != nil {
+ f.Close()
+ return err
+ }
+ if err := f.Sync(); err != nil {
+ f.Close()
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ return os.Rename(f.Name(), filename)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/log/context.go b/vendor/src/github.com/docker/swarmkit/log/context.go
new file mode 100644
index 0000000000..b919e358bf
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/log/context.go
@@ -0,0 +1,37 @@
+package log
+
+import (
+ "github.com/Sirupsen/logrus"
+ "golang.org/x/net/context"
+)
+
+var (
+ // G is an alias for GetLogger.
+ //
+ // We may want to define this locally to a package to get package tagged log
+ // messages.
+ G = GetLogger
+
+ // L is an alias for the the standard logger.
+ L = logrus.NewEntry(logrus.StandardLogger())
+)
+
+type loggerKey struct{}
+
+// WithLogger returns a new context with the provided logger. Use in
+// combination with logger.WithField(s) for great effect.
+func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
+ return context.WithValue(ctx, loggerKey{}, logger)
+}
+
+// GetLogger retrieves the current logger from the context. If no logger is
+// available, the default logger is returned.
+func GetLogger(ctx context.Context) *logrus.Entry {
+ logger := ctx.Value(loggerKey{})
+
+ if logger == nil {
+ return L
+ }
+
+ return logger.(*logrus.Entry)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/log/grpc.go b/vendor/src/github.com/docker/swarmkit/log/grpc.go
new file mode 100644
index 0000000000..df6e932a8d
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/log/grpc.go
@@ -0,0 +1,8 @@
+package log
+
+import "google.golang.org/grpc/grpclog"
+
+func init() {
+ // completely replace the grpc logger with the logrus logger.
+ grpclog.SetLogger(L)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/allocator/allocator.go b/vendor/src/github.com/docker/swarmkit/manager/allocator/allocator.go
new file mode 100644
index 0000000000..b1c0058065
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/allocator/allocator.go
@@ -0,0 +1,221 @@
+package allocator
+
+import (
+ "sync"
+
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+)
+
+// Allocator controls how the allocation stage in the manager is handled.
+type Allocator struct {
+ // The manager store.
+ store *store.MemoryStore
+
+ // the ballot used to synchronize across all allocators to ensure
+ // all of them have completed their respective allocations so that the
+ // task can be moved to ALLOCATED state.
+ taskBallot *taskBallot
+
+ // context for the network allocator that will be needed by
+ // network allocator.
+ netCtx *networkContext
+
+ // stopChan signals to the allocator to stop running.
+ stopChan chan struct{}
+ // doneChan is closed when the allocator is finished running.
+ doneChan chan struct{}
+}
+
+// taskBallot controls how the voting for task allocation is
+// coordinated b/w different allocators. This the only structure that
+// will be written by all allocator goroutines concurrently. Hence the
+// mutex.
+type taskBallot struct {
+ sync.Mutex
+
+ // List of registered voters who have to cast their vote to
+ // indicate their allocation complete
+ voters []string
+
+ // List of votes collected for every task so far from different voters.
+ votes map[string][]string
+}
+
+// allocActor controls the various phases in the lifecycle of one kind of allocator.
+type allocActor struct {
+ // Channel through which the allocator gets all the events
+ // that it is interested in.
+ ch chan events.Event
+
+ // cancel unregisters the watcher.
+ cancel func()
+
+ // Task voter identity of the allocator.
+ taskVoter string
+
+ // Action routine which is called for every event that the
+ // allocator received.
+ action func(context.Context, events.Event)
+
+ // Init routine which is called during the initialization of
+ // the allocator.
+ init func(ctx context.Context) error
+}
+
+// New returns a new instance of Allocator for use during allocation
+// stage of the manager.
+func New(store *store.MemoryStore) (*Allocator, error) {
+ a := &Allocator{
+ store: store,
+ taskBallot: &taskBallot{
+ votes: make(map[string][]string),
+ },
+ stopChan: make(chan struct{}),
+ doneChan: make(chan struct{}),
+ }
+
+ return a, nil
+}
+
+// Run starts all allocator go-routines and waits for Stop to be called.
+func (a *Allocator) Run(ctx context.Context) error {
+ // Setup cancel context for all goroutines to use.
+ ctx, cancel := context.WithCancel(ctx)
+ var wg sync.WaitGroup
+
+ defer func() {
+ cancel()
+ wg.Wait()
+ close(a.doneChan)
+ }()
+
+ var actors []func() error
+ watch, watchCancel := state.Watch(a.store.WatchQueue(),
+ state.EventCreateNetwork{},
+ state.EventDeleteNetwork{},
+ state.EventCreateService{},
+ state.EventUpdateService{},
+ state.EventDeleteService{},
+ state.EventCreateTask{},
+ state.EventUpdateTask{},
+ state.EventDeleteTask{},
+ state.EventCreateNode{},
+ state.EventUpdateNode{},
+ state.EventDeleteNode{},
+ state.EventCommit{},
+ )
+
+ for _, aa := range []allocActor{
+ {
+ ch: watch,
+ cancel: watchCancel,
+ taskVoter: networkVoter,
+ init: a.doNetworkInit,
+ action: a.doNetworkAlloc,
+ },
+ } {
+ if aa.taskVoter != "" {
+ a.registerToVote(aa.taskVoter)
+ }
+
+ // Copy the iterated value for variable capture.
+ aaCopy := aa
+ actor := func() error {
+ wg.Add(1)
+ // init might return an allocator specific context
+ // which is a child of the passed in context to hold
+ // allocator specific state
+ if err := aaCopy.init(ctx); err != nil {
+ // Stop the watches for this allocator
+ // if we are failing in the init of
+ // this allocator.
+ aa.cancel()
+ wg.Done()
+ return err
+ }
+
+ go func() {
+ defer wg.Done()
+ a.run(ctx, aaCopy)
+ }()
+ return nil
+ }
+
+ actors = append(actors, actor)
+ }
+
+ for _, actor := range actors {
+ if err := actor(); err != nil {
+ return err
+ }
+ }
+
+ <-a.stopChan
+ return nil
+}
+
+// Stop stops the allocator
+func (a *Allocator) Stop() {
+ close(a.stopChan)
+ // Wait for all allocator goroutines to truly exit
+ <-a.doneChan
+}
+
+func (a *Allocator) run(ctx context.Context, aa allocActor) {
+ for {
+ select {
+ case ev, ok := <-aa.ch:
+ if !ok {
+ return
+ }
+
+ aa.action(ctx, ev)
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (a *Allocator) registerToVote(name string) {
+ a.taskBallot.Lock()
+ defer a.taskBallot.Unlock()
+
+ a.taskBallot.voters = append(a.taskBallot.voters, name)
+}
+
+func (a *Allocator) taskAllocateVote(voter string, id string) bool {
+ a.taskBallot.Lock()
+ defer a.taskBallot.Unlock()
+
+ // If voter has already voted, return false
+ for _, v := range a.taskBallot.votes[id] {
+ // check if voter is in x
+ if v == voter {
+ return false
+ }
+ }
+
+ a.taskBallot.votes[id] = append(a.taskBallot.votes[id], voter)
+
+ // We haven't gotten enough votes yet
+ if len(a.taskBallot.voters) > len(a.taskBallot.votes[id]) {
+ return false
+ }
+
+nextVoter:
+ for _, voter := range a.taskBallot.voters {
+ for _, vote := range a.taskBallot.votes[id] {
+ if voter == vote {
+ continue nextVoter
+ }
+ }
+
+ // Not every registered voter has registered a vote.
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/allocator/doc.go b/vendor/src/github.com/docker/swarmkit/manager/allocator/doc.go
new file mode 100644
index 0000000000..ae64be3fda
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/allocator/doc.go
@@ -0,0 +1,18 @@
+// Package allocator aims to manage allocation of different
+// cluster-wide resources on behalf of the manager. In particular, it
+// manages a set of independent allocator processes which can mostly
+// execute concurrently with only a minimal need for coordination.
+//
+// One of the instances where it needs coordination is when to move a
+// task to ALLOCATED state. Since a task can move to ALLOCATED state
+// only when all task allocators have completed their service of
+// allocation, they all have to agree on that. The way this achieved
+// in `allocator` is by creating a `taskBallot` to which all task
+// allocators register themselves as mandatory voters. For each task
+// that needs allocation, each allocator indepdently votes to indicate
+// the completion of their allocation. Once all registered voters have
+// voted then the task is moved to ALLOCATED state.
+//
+// Other than the coordination needed for task ALLOCATED state, all
+// the allocators function fairly indepdently.
+package allocator
diff --git a/vendor/src/github.com/docker/swarmkit/manager/allocator/network.go b/vendor/src/github.com/docker/swarmkit/manager/allocator/network.go
new file mode 100644
index 0000000000..962ab88435
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/allocator/network.go
@@ -0,0 +1,777 @@
+package allocator
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/allocator/networkallocator"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+)
+
+const (
+ // Network allocator Voter ID for task allocation vote.
+ networkVoter = "network"
+
+ ingressNetworkName = "ingress"
+ ingressSubnet = "10.255.0.0/16"
+)
+
+var ingressNetwork = &api.Network{
+ Spec: api.NetworkSpec{
+ Annotations: api.Annotations{
+ Name: ingressNetworkName,
+ Labels: map[string]string{
+ "com.docker.swarm.internal": "true",
+ },
+ },
+ DriverConfig: &api.Driver{},
+ IPAM: &api.IPAMOptions{
+ Driver: &api.Driver{},
+ Configs: []*api.IPAMConfig{
+ {
+ Subnet: ingressSubnet,
+ },
+ },
+ },
+ },
+}
+
+// Network context information which is used throughout the network allocation code.
+type networkContext struct {
+ // Instance of the low-level network allocator which performs
+ // the actual network allocation.
+ nwkAllocator *networkallocator.NetworkAllocator
+
+ // A table of unallocated tasks which will be revisited if any thing
+ // changes in system state that might help task allocation.
+ unallocatedTasks map[string]*api.Task
+}
+
+func (a *Allocator) doNetworkInit(ctx context.Context) error {
+ na, err := networkallocator.New()
+ if err != nil {
+ return err
+ }
+
+ nc := &networkContext{
+ nwkAllocator: na,
+ unallocatedTasks: make(map[string]*api.Task),
+ }
+
+ // Check if we have the ingress network. If not found create
+ // it before reading all network objects for allocation.
+ var networks []*api.Network
+ a.store.View(func(tx store.ReadTx) {
+ networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
+ if len(networks) > 0 {
+ ingressNetwork = networks[0]
+ }
+ })
+ if err != nil {
+ return fmt.Errorf("failed to find ingress network during init: %v", err)
+ }
+
+ // If ingress network is not found, create one right away
+ // using the predefined template.
+ if len(networks) == 0 {
+ if err := a.store.Update(func(tx store.Tx) error {
+ ingressNetwork.ID = identity.NewID()
+ if err := store.CreateNetwork(tx, ingressNetwork); err != nil {
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("failed to create ingress network: %v", err)
+ }
+
+ a.store.View(func(tx store.ReadTx) {
+ networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
+ if len(networks) > 0 {
+ ingressNetwork = networks[0]
+ }
+ })
+ if err != nil {
+ return fmt.Errorf("failed to find ingress network after creating it: %v", err)
+ }
+
+ }
+
+ // Try to complete ingress network allocation before anything else so
+ // that the we can get the preferred subnet for ingress
+ // network.
+ if !na.IsAllocated(ingressNetwork) {
+ if err := a.allocateNetwork(ctx, nc, ingressNetwork); err != nil {
+ log.G(ctx).Errorf("failed allocating ingress network during init: %v", err)
+ }
+
+ // Update store after allocation
+ if err := a.store.Update(func(tx store.Tx) error {
+ if err := store.UpdateNetwork(tx, ingressNetwork); err != nil {
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("failed to create ingress network: %v", err)
+ }
+ }
+
+ // Allocate networks in the store so far before we started
+ // watching.
+ a.store.View(func(tx store.ReadTx) {
+ networks, err = store.FindNetworks(tx, store.All)
+ })
+ if err != nil {
+ return fmt.Errorf("error listing all networks in store while trying to allocate during init: %v", err)
+ }
+
+ for _, n := range networks {
+ if na.IsAllocated(n) {
+ continue
+ }
+
+ if err := a.allocateNetwork(ctx, nc, n); err != nil {
+ log.G(ctx).Errorf("failed allocating network %s during init: %v", n.ID, err)
+ }
+ }
+
+ // Allocate nodes in the store so far before we process watched events.
+ var nodes []*api.Node
+ a.store.View(func(tx store.ReadTx) {
+ nodes, err = store.FindNodes(tx, store.All)
+ })
+ if err != nil {
+ return fmt.Errorf("error listing all services in store while trying to allocate during init: %v", err)
+ }
+
+ for _, node := range nodes {
+ if na.IsNodeAllocated(node) {
+ continue
+ }
+
+ if node.Attachment == nil {
+ node.Attachment = &api.NetworkAttachment{}
+ }
+
+ node.Attachment.Network = ingressNetwork.Copy()
+ if err := a.allocateNode(ctx, nc, node); err != nil {
+ log.G(ctx).Errorf("Failed to allocate network resources for node %s during init: %v", node.ID, err)
+ }
+ }
+
+ // Allocate services in the store so far before we process watched events.
+ var services []*api.Service
+ a.store.View(func(tx store.ReadTx) {
+ services, err = store.FindServices(tx, store.All)
+ })
+ if err != nil {
+ return fmt.Errorf("error listing all services in store while trying to allocate during init: %v", err)
+ }
+
+ for _, s := range services {
+ if s.Spec.Endpoint == nil {
+ continue
+ }
+
+ if na.IsServiceAllocated(s) {
+ continue
+ }
+
+ if err := a.allocateService(ctx, nc, s); err != nil {
+ log.G(ctx).Errorf("failed allocating service %s during init: %v", s.ID, err)
+ }
+ }
+
+ // Allocate tasks in the store so far before we started watching.
+ var tasks []*api.Task
+ a.store.View(func(tx store.ReadTx) {
+ tasks, err = store.FindTasks(tx, store.All)
+ })
+ if err != nil {
+ return fmt.Errorf("error listing all tasks in store while trying to allocate during init: %v", err)
+ }
+
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
+ for _, t := range tasks {
+ if taskDead(t) {
+ continue
+ }
+
+ var s *api.Service
+ if t.ServiceID != "" {
+ a.store.View(func(tx store.ReadTx) {
+ s = store.GetService(tx, t.ServiceID)
+ })
+ }
+
+ // Populate network attachments in the task
+ // based on service spec.
+ a.taskCreateNetworkAttachments(t, s)
+
+ if taskReadyForNetworkVote(t, s, nc) {
+ if t.Status.State >= api.TaskStateAllocated {
+ continue
+ }
+
+ if a.taskAllocateVote(networkVoter, t.ID) {
+ // If the task is not attached to any network, network
+ // allocators job is done. Immediately cast a vote so
+ // that the task can be moved to ALLOCATED state as
+ // soon as possible.
+ if err := batch.Update(func(tx store.Tx) error {
+ storeT := store.GetTask(tx, t.ID)
+ if storeT == nil {
+ return fmt.Errorf("task %s not found while trying to update state", t.ID)
+ }
+
+ updateTaskStatus(storeT, api.TaskStateAllocated, "allocated")
+
+ if err := store.UpdateTask(tx, storeT); err != nil {
+ return fmt.Errorf("failed updating state in store transaction for task %s: %v", storeT.ID, err)
+ }
+
+ return nil
+ }); err != nil {
+ log.G(ctx).WithError(err).Error("error updating task network")
+ }
+ }
+ continue
+ }
+
+ err := batch.Update(func(tx store.Tx) error {
+ _, err := a.allocateTask(ctx, nc, tx, t)
+ return err
+ })
+ if err != nil {
+ log.G(ctx).Errorf("failed allocating task %s during init: %v", t.ID, err)
+ nc.unallocatedTasks[t.ID] = t
+ }
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ a.netCtx = nc
+ return nil
+}
+
+func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
+ nc := a.netCtx
+
+ switch v := ev.(type) {
+ case state.EventCreateNetwork:
+ n := v.Network.Copy()
+ if nc.nwkAllocator.IsAllocated(n) {
+ break
+ }
+
+ if err := a.allocateNetwork(ctx, nc, n); err != nil {
+ log.G(ctx).Errorf("Failed allocation for network %s: %v", n.ID, err)
+ break
+ }
+ case state.EventDeleteNetwork:
+ n := v.Network.Copy()
+
+ // The assumption here is that all dependent objects
+ // have been cleaned up when we are here so the only
+ // thing that needs to happen is free the network
+ // resources.
+ if err := nc.nwkAllocator.Deallocate(n); err != nil {
+ log.G(ctx).Errorf("Failed during network free for network %s: %v", n.ID, err)
+ }
+ case state.EventCreateService:
+ s := v.Service.Copy()
+
+ if !serviceAllocationNeeded(s, nc) {
+ break
+ }
+
+ if err := a.allocateService(ctx, nc, s); err != nil {
+ log.G(ctx).Errorf("Failed allocation for service %s: %v", s.ID, err)
+ break
+ }
+ case state.EventUpdateService:
+ s := v.Service.Copy()
+
+ if !serviceAllocationNeeded(s, nc) {
+ break
+ }
+
+ if err := a.allocateService(ctx, nc, s); err != nil {
+ log.G(ctx).Errorf("Failed allocation during update of service %s: %v", s.ID, err)
+ break
+ }
+ case state.EventDeleteService:
+ s := v.Service.Copy()
+
+ if serviceAllocationNeeded(s, nc) {
+ break
+ }
+
+ if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
+ log.G(ctx).Errorf("Failed deallocation during delete of service %s: %v", s.ID, err)
+ }
+ case state.EventCreateNode, state.EventUpdateNode, state.EventDeleteNode:
+ a.doNodeAlloc(ctx, nc, ev)
+ case state.EventCreateTask, state.EventUpdateTask, state.EventDeleteTask:
+ a.doTaskAlloc(ctx, nc, ev)
+ case state.EventCommit:
+ a.procUnallocatedTasksNetwork(ctx, nc)
+ return
+ }
+}
+
+func (a *Allocator) doNodeAlloc(ctx context.Context, nc *networkContext, ev events.Event) {
+ var (
+ isDelete bool
+ node *api.Node
+ )
+
+ switch v := ev.(type) {
+ case state.EventCreateNode:
+ node = v.Node.Copy()
+ case state.EventUpdateNode:
+ node = v.Node.Copy()
+ case state.EventDeleteNode:
+ isDelete = true
+ node = v.Node.Copy()
+ }
+
+ if isDelete {
+ if nc.nwkAllocator.IsNodeAllocated(node) {
+ if err := nc.nwkAllocator.DeallocateNode(node); err != nil {
+ log.G(ctx).Errorf("Failed freeing network resources for node %s: %v", node.ID, err)
+ }
+ }
+ return
+ }
+
+ if !nc.nwkAllocator.IsNodeAllocated(node) {
+ if node.Attachment == nil {
+ node.Attachment = &api.NetworkAttachment{}
+ }
+
+ node.Attachment.Network = ingressNetwork.Copy()
+ if err := a.allocateNode(ctx, nc, node); err != nil {
+ log.G(ctx).Errorf("Fauled to allocate network resources for node %s: %v", node.ID, err)
+ }
+ }
+}
+
+// serviceAllocationNeeded returns if a service needs allocation or not.
+func serviceAllocationNeeded(s *api.Service, nc *networkContext) bool {
+ // Service needs allocation if:
+ // Spec has network attachments and endpoint resolution mode is VIP OR
+ // Spec has non-zero number of exposed ports and ingress routing is SwarmPort
+ if (len(s.Spec.Networks) != 0 &&
+ (s.Spec.Endpoint == nil ||
+ (s.Spec.Endpoint != nil &&
+ s.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP))) ||
+ (s.Spec.Endpoint != nil &&
+ len(s.Spec.Endpoint.Ports) != 0) {
+ return !nc.nwkAllocator.IsServiceAllocated(s)
+ }
+
+ return false
+}
+
+// taskRunning checks whether a task is either actively running, or in the
+// process of starting up.
+func taskRunning(t *api.Task) bool {
+ return t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning
+}
+
+// taskDead checks whether a task is not actively running as far as allocator purposes are concerned.
+func taskDead(t *api.Task) bool {
+ return t.DesiredState > api.TaskStateRunning && t.Status.State > api.TaskStateRunning
+}
+
+// taskReadyForNetworkVote checks if the task is ready for a network
+// vote to move it to ALLOCATED state.
+func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
+ // Task is ready for vote if the following is true:
+ //
+ // Task has no network attached or networks attached but all
+ // of them allocated AND Task's service has no endpoint or
+ // network configured or service endpoints have been
+ // allocated.
+ return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) &&
+ (s == nil || !serviceAllocationNeeded(s, nc))
+}
+
+func taskUpdateNetworks(t *api.Task, networks []*api.NetworkAttachment) {
+ networksCopy := make([]*api.NetworkAttachment, 0, len(networks))
+ for _, n := range networks {
+ networksCopy = append(networksCopy, n.Copy())
+ }
+
+ t.Networks = networksCopy
+}
+
+func taskUpdateEndpoint(t *api.Task, endpoint *api.Endpoint) {
+ t.Endpoint = endpoint.Copy()
+}
+
+func (a *Allocator) taskCreateNetworkAttachments(t *api.Task, s *api.Service) {
+ // If service is nil or if task network attachments have
+ // already been filled in no need to do anything else.
+ if s == nil || len(t.Networks) != 0 {
+ return
+ }
+
+ var networks []*api.NetworkAttachment
+
+ // The service to which this task belongs is trying to expose
+ // ports to the external world. Automatically attach the task
+ // to the ingress network.
+ if s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0 {
+ networks = append(networks, &api.NetworkAttachment{Network: ingressNetwork})
+ }
+
+ a.store.View(func(tx store.ReadTx) {
+ for _, na := range s.Spec.Networks {
+ n := store.GetNetwork(tx, na.Target)
+ if n != nil {
+ networks = append(networks, &api.NetworkAttachment{Network: n})
+ }
+ }
+ })
+
+ taskUpdateNetworks(t, networks)
+}
+
+func (a *Allocator) doTaskAlloc(ctx context.Context, nc *networkContext, ev events.Event) {
+ var (
+ isDelete bool
+ t *api.Task
+ )
+
+ switch v := ev.(type) {
+ case state.EventCreateTask:
+ t = v.Task.Copy()
+ case state.EventUpdateTask:
+ t = v.Task.Copy()
+ case state.EventDeleteTask:
+ isDelete = true
+ t = v.Task.Copy()
+ }
+
+ // If the task has stopped running or it's being deleted then
+ // we should free the network resources associated with the
+ // task right away.
+ if taskDead(t) || isDelete {
+ if nc.nwkAllocator.IsTaskAllocated(t) {
+ if err := nc.nwkAllocator.DeallocateTask(t); err != nil {
+ log.G(ctx).Errorf("Failed freeing network resources for task %s: %v", t.ID, err)
+ }
+ }
+
+ // Cleanup any task references that might exist in unallocatedTasks
+ delete(nc.unallocatedTasks, t.ID)
+ return
+ }
+
+ // If we are already in allocated state, there is
+ // absolutely nothing else to do.
+ if t.Status.State >= api.TaskStateAllocated {
+ delete(nc.unallocatedTasks, t.ID)
+ return
+ }
+
+ var s *api.Service
+ if t.ServiceID != "" {
+ a.store.View(func(tx store.ReadTx) {
+ s = store.GetService(tx, t.ServiceID)
+ })
+ if s == nil {
+ // If the task is running it is not normal to
+ // not be able to find the associated
+ // service. If the task is not running (task
+ // is either dead or the desired state is set
+ // to dead) then the service may not be
+ // available in store. But we still need to
+ // cleanup network resources associated with
+ // the task.
+ if taskRunning(t) && !isDelete {
+ log.G(ctx).Errorf("Event %T: Failed to get service %s for task %s state %s: could not find service %s", ev, t.ServiceID, t.ID, t.Status.State, t.ServiceID)
+ return
+ }
+ }
+
+ // Populate network attachments in the task
+ // based on service spec.
+ a.taskCreateNetworkAttachments(t, s)
+ }
+
+ nc.unallocatedTasks[t.ID] = t
+}
+
+func (a *Allocator) allocateNode(ctx context.Context, nc *networkContext, node *api.Node) error {
+ if err := nc.nwkAllocator.AllocateNode(node); err != nil {
+ return err
+ }
+
+ if err := a.store.Update(func(tx store.Tx) error {
+ for {
+ err := store.UpdateNode(tx, node)
+ if err != nil && err != store.ErrSequenceConflict {
+ return fmt.Errorf("failed updating state in store transaction for node %s: %v", node.ID, err)
+ }
+
+ if err == store.ErrSequenceConflict {
+ storeNode := store.GetNode(tx, node.ID)
+ storeNode.Attachment = node.Attachment.Copy()
+ node = storeNode
+ continue
+ }
+
+ break
+ }
+ return nil
+ }); err != nil {
+ if err := nc.nwkAllocator.DeallocateNode(node); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed rolling back allocation of node %s: %v", node.ID, err)
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *api.Service) error {
+ // The service is trying to expose ports to the external
+ // world. Automatically attach the service to the ingress
+ // network only if it is not already done.
+ if s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0 {
+ if s.Endpoint == nil {
+ s.Endpoint = &api.Endpoint{}
+ }
+
+ var found bool
+ for _, vip := range s.Endpoint.VirtualIPs {
+ if vip.NetworkID == ingressNetwork.ID {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
+ &api.Endpoint_VirtualIP{NetworkID: ingressNetwork.ID})
+ }
+ }
+
+ if err := nc.nwkAllocator.ServiceAllocate(s); err != nil {
+ return err
+ }
+
+ if err := a.store.Update(func(tx store.Tx) error {
+ for {
+ err := store.UpdateService(tx, s)
+
+ if err != nil && err != store.ErrSequenceConflict {
+ return fmt.Errorf("failed updating state in store transaction for service %s: %v", s.ID, err)
+ }
+
+ if err == store.ErrSequenceConflict {
+ storeService := store.GetService(tx, s.ID)
+ storeService.Endpoint = s.Endpoint
+ s = storeService
+ continue
+ }
+
+ break
+ }
+ return nil
+ }); err != nil {
+ if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed rolling back allocation of service %s: %v", s.ID, err)
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (a *Allocator) allocateNetwork(ctx context.Context, nc *networkContext, n *api.Network) error {
+ if err := nc.nwkAllocator.Allocate(n); err != nil {
+ return fmt.Errorf("failed during network allocation for network %s: %v", n.ID, err)
+ }
+
+ if err := a.store.Update(func(tx store.Tx) error {
+ if err := store.UpdateNetwork(tx, n); err != nil {
+ return fmt.Errorf("failed updating state in store transaction for network %s: %v", n.ID, err)
+ }
+ return nil
+ }); err != nil {
+ if err := nc.nwkAllocator.Deallocate(n); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed rolling back allocation of network %s", n.ID)
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx store.Tx, t *api.Task) (*api.Task, error) {
+ taskUpdated := false
+
+ // Get the latest task state from the store before updating.
+ storeT := store.GetTask(tx, t.ID)
+ if storeT == nil {
+ return nil, fmt.Errorf("could not find task %s while trying to update network allocation", t.ID)
+ }
+
+ // We might be here even if a task allocation has already
+ // happened but wasn't successfully committed to store. In such
+ // cases skip allocation and go straight ahead to updating the
+ // store.
+ if !nc.nwkAllocator.IsTaskAllocated(t) {
+ if t.ServiceID != "" {
+ s := store.GetService(tx, t.ServiceID)
+ if s == nil {
+ return nil, fmt.Errorf("could not find service %s", t.ServiceID)
+ }
+
+ if serviceAllocationNeeded(s, nc) {
+ return nil, fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
+ }
+
+ taskUpdateEndpoint(t, s.Endpoint)
+ }
+
+ for _, na := range t.Networks {
+ n := store.GetNetwork(tx, na.Network.ID)
+ if n == nil {
+ return nil, fmt.Errorf("failed to retrieve network %s while allocating task %s", na.Network.ID, t.ID)
+ }
+
+ if !nc.nwkAllocator.IsAllocated(n) {
+ return nil, fmt.Errorf("network %s attached to task %s not allocated yet", n.ID, t.ID)
+ }
+ }
+
+ if err := nc.nwkAllocator.AllocateTask(t); err != nil {
+ return nil, fmt.Errorf("failed during networktask allocation for task %s: %v", t.ID, err)
+ }
+ if nc.nwkAllocator.IsTaskAllocated(t) {
+ taskUpdateNetworks(storeT, t.Networks)
+ taskUpdateEndpoint(storeT, t.Endpoint)
+ taskUpdated = true
+ }
+ }
+
+ // Update the network allocations and moving to
+ // ALLOCATED state on top of the latest store state.
+ if a.taskAllocateVote(networkVoter, t.ID) {
+ if storeT.Status.State < api.TaskStateAllocated {
+ updateTaskStatus(storeT, api.TaskStateAllocated, "allocated")
+ taskUpdated = true
+ }
+ }
+
+ if taskUpdated {
+ if err := store.UpdateTask(tx, storeT); err != nil {
+ return nil, fmt.Errorf("failed updating state in store transaction for task %s: %v", storeT.ID, err)
+ }
+ }
+
+ return storeT, nil
+}
+
+func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context, nc *networkContext) {
+ tasks := make([]*api.Task, 0, len(nc.unallocatedTasks))
+
+ committed, err := a.store.Batch(func(batch *store.Batch) error {
+ for _, t := range nc.unallocatedTasks {
+ var allocatedT *api.Task
+ err := batch.Update(func(tx store.Tx) error {
+ var err error
+ allocatedT, err = a.allocateTask(ctx, nc, tx, t)
+ return err
+ })
+
+ if err != nil {
+ log.G(ctx).WithError(err).Error("task allocation failure")
+ continue
+ }
+
+ tasks = append(tasks, allocatedT)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
+ }
+
+ var retryCnt int
+ for len(tasks) != 0 {
+ var err error
+
+ for _, t := range tasks[:committed] {
+ delete(nc.unallocatedTasks, t.ID)
+ }
+
+ tasks = tasks[committed:]
+ if len(tasks) == 0 {
+ break
+ }
+
+ updatedTasks := make([]*api.Task, 0, len(tasks))
+ committed, err = a.store.Batch(func(batch *store.Batch) error {
+ for _, t := range tasks {
+ err := batch.Update(func(tx store.Tx) error {
+ return store.UpdateTask(tx, t)
+ })
+
+ if err != nil {
+ log.G(ctx).WithError(err).Error("allocated task store update failure")
+ continue
+ }
+
+ updatedTasks = append(updatedTasks, t)
+ }
+
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
+ }
+
+ tasks = updatedTasks
+
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ retryCnt++
+ if retryCnt >= 3 {
+ log.G(ctx).Errorf("failed to complete batch update of allocated tasks after 3 retries")
+ break
+ }
+ }
+}
+
+// updateTaskStatus sets TaskStatus and updates timestamp.
+func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
+ t.Status.State = newStatus
+ t.Status.Message = message
+ t.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go b/vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
new file mode 100644
index 0000000000..fa02179168
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
@@ -0,0 +1,635 @@
+package networkallocator
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/drivers/overlay/ovmanager"
+ "github.com/docker/libnetwork/drvregistry"
+ "github.com/docker/libnetwork/ipamapi"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "golang.org/x/net/context"
+)
+
+const (
+ defaultDriver = "overlay"
+)
+
+var (
+ defaultDriverInitFunc = ovmanager.Init
+)
+
+// NetworkAllocator acts as the controller for all network related operations
+// like managing network and IPAM drivers and also creating and
+// deleting networks and the associated resources.
+type NetworkAllocator struct {
+ // The driver register which manages all internal and external
+ // IPAM and network drivers.
+ drvRegistry *drvregistry.DrvRegistry
+
+ // The port allocator instance for allocating node ports
+ portAllocator *portAllocator
+
+ // Local network state used by NetworkAllocator to do network management.
+ networks map[string]*network
+
+ // Allocator state to indicate if allocation has been
+ // successfully completed for this service.
+ services map[string]struct{}
+}
+
+// Local in-memory state related to netwok that need to be tracked by NetworkAllocator
+type network struct {
+ // A local cache of the store object.
+ nw *api.Network
+
+ // pools is used to save the internal poolIDs needed when
+ // releasing the pool.
+ pools map[string]string
+
+ // endpoints is a map of endpoint IP to the poolID from which it
+ // was allocated.
+ endpoints map[string]string
+}
+
+// New returns a new NetworkAllocator handle
+func New() (*NetworkAllocator, error) {
+ na := &NetworkAllocator{
+ networks: make(map[string]*network),
+ services: make(map[string]struct{}),
+ }
+
+ // There are no driver configurations and notification
+ // functions as of now.
+ reg, err := drvregistry.New(nil, nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add the manager component of overlay driver to the registry.
+ if err := reg.AddDriver(defaultDriver, defaultDriverInitFunc, nil); err != nil {
+ return nil, err
+ }
+
+ pa, err := newPortAllocator()
+ if err != nil {
+ return nil, err
+ }
+
+ na.portAllocator = pa
+ na.drvRegistry = reg
+ return na, nil
+}
+
+// Allocate allocates all the necessary resources both general
+// and driver-specific which may be specified in the NetworkSpec
+func (na *NetworkAllocator) Allocate(n *api.Network) error {
+ if _, ok := na.networks[n.ID]; ok {
+ return fmt.Errorf("network %s already allocated", n.ID)
+ }
+
+ pools, err := na.allocatePools(n)
+ if err != nil {
+ return fmt.Errorf("failed allocating pools and gateway IP for network %s: %v", n.ID, err)
+ }
+
+ if err := na.allocateDriverState(n); err != nil {
+ return fmt.Errorf("failed while allocating driver state for network %s: %v", n.ID, err)
+ }
+
+ na.networks[n.ID] = &network{
+ nw: n,
+ pools: pools,
+ endpoints: make(map[string]string),
+ }
+
+ return nil
+}
+
+func (na *NetworkAllocator) getNetwork(id string) *network {
+ return na.networks[id]
+}
+
+// Deallocate frees all the general and driver specific resources
+// whichs were assigned to the passed network.
+func (na *NetworkAllocator) Deallocate(n *api.Network) error {
+ localNet := na.getNetwork(n.ID)
+ if localNet == nil {
+ return fmt.Errorf("could not get networker state for network %s", n.ID)
+ }
+
+ if err := na.freeDriverState(n); err != nil {
+ return fmt.Errorf("failed to free driver state for network %s: %v", n.ID, err)
+ }
+
+ delete(na.networks, n.ID)
+ return na.freePools(n, localNet.pools)
+}
+
+// ServiceAllocate allocates all the network resources such as virtual
+// IP and ports needed by the service.
+func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
+ if err = na.portAllocator.serviceAllocatePorts(s); err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ na.ServiceDeallocate(s)
+ }
+ }()
+
+ // If ResolutionMode is DNSRR do not try allocating VIPs.
+ if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {
+ return
+ }
+
+ if s.Endpoint == nil {
+ s.Endpoint = &api.Endpoint{}
+ }
+
+ // First allocate VIPs for all the pre-populated endpoint attachments
+ for _, eAttach := range s.Endpoint.VirtualIPs {
+ if err = na.allocateVIP(eAttach); err != nil {
+ return
+ }
+ }
+
+ for _, nAttach := range s.Spec.Networks {
+ vip := &api.Endpoint_VirtualIP{NetworkID: nAttach.Target}
+ if err = na.allocateVIP(vip); err != nil {
+ return
+ }
+
+ s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, vip)
+ }
+
+ na.services[s.ID] = struct{}{}
+ return
+}
+
+// ServiceDeallocate de-allocates all the network resources such as
+// virtual IP and ports associated with the service.
+func (na *NetworkAllocator) ServiceDeallocate(s *api.Service) error {
+ if s.Endpoint == nil {
+ return nil
+ }
+
+ for _, vip := range s.Endpoint.VirtualIPs {
+ if err := na.deallocateVIP(vip); err != nil {
+ // don't bail here, deallocate as many as possible.
+ log.L.WithError(err).
+ WithField("vip.network", vip.NetworkID).
+ WithField("vip.addr", vip.Addr).Error("error deallocating vip")
+ }
+ }
+
+ na.portAllocator.serviceDeallocatePorts(s)
+ delete(na.services, s.ID)
+
+ return nil
+}
+
+// IsAllocated returns if the passed network has been allocated or not.
+func (na *NetworkAllocator) IsAllocated(n *api.Network) bool {
+ _, ok := na.networks[n.ID]
+ return ok
+}
+
+// IsTaskAllocated returns if the passed task has it's network resources allocated or not.
+func (na *NetworkAllocator) IsTaskAllocated(t *api.Task) bool {
+ // If Networks is empty there is no way this Task is allocated.
+ if len(t.Networks) == 0 {
+ return false
+ }
+
+ // To determine whether the task has it's resources allocated,
+ // we just need to look at one network(in case of
+ // multi-network attachment). This is because we make sure we
+ // allocate for every network or we allocate for none.
+
+ // If the network is not allocated, the task cannot be allocated.
+ localNet, ok := na.networks[t.Networks[0].Network.ID]
+ if !ok {
+ return false
+ }
+
+ // Addresses empty. Task is not allocated.
+ if len(t.Networks[0].Addresses) == 0 {
+ return false
+ }
+
+ // The allocated IP address not found in local endpoint state. Not allocated.
+ if _, ok := localNet.endpoints[t.Networks[0].Addresses[0]]; !ok {
+ return false
+ }
+
+ return true
+}
+
+// IsServiceAllocated returns if the passed service has it's network resources allocated or not.
+func (na *NetworkAllocator) IsServiceAllocated(s *api.Service) bool {
+ if _, ok := na.services[s.ID]; !ok {
+ return false
+ }
+
+ if s.Spec.Endpoint != nil {
+ return na.portAllocator.isPortsAllocated(s)
+ }
+
+ return true
+}
+
+// IsNodeAllocated returns if the passed node has its network resources allocated or not.
+func (na *NetworkAllocator) IsNodeAllocated(node *api.Node) bool {
+ // If no attachment, not allocated.
+ if node.Attachment == nil {
+ return false
+ }
+
+ // If the network is not allocated, the node cannot be allocated.
+ localNet, ok := na.networks[node.Attachment.Network.ID]
+ if !ok {
+ return false
+ }
+
+ // Addresses empty, not allocated.
+ if len(node.Attachment.Addresses) == 0 {
+ return false
+ }
+
+ // The allocated IP address not found in local endpoint state. Not allocated.
+ if _, ok := localNet.endpoints[node.Attachment.Addresses[0]]; !ok {
+ return false
+ }
+
+ return true
+}
+
+// AllocateNode allocates the IP addresses for the network to which
+// the node is attached.
+func (na *NetworkAllocator) AllocateNode(node *api.Node) error {
+ return na.allocateNetworkIPs(node.Attachment)
+}
+
+// DeallocateNode deallocates the IP addresses for the network to
+// which the node is attached.
+func (na *NetworkAllocator) DeallocateNode(node *api.Node) error {
+ return na.releaseEndpoints([]*api.NetworkAttachment{node.Attachment})
+}
+
+// AllocateTask allocates all the endpoint resources for all the
+// networks that a task is attached to.
+func (na *NetworkAllocator) AllocateTask(t *api.Task) error {
+ for i, nAttach := range t.Networks {
+ if err := na.allocateNetworkIPs(nAttach); err != nil {
+ if err := na.releaseEndpoints(t.Networks[:i]); err != nil {
+ log.G(context.TODO()).Errorf("Failed to release IP addresses while rolling back allocation for task %s network %s: %v", t.ID, nAttach.Network.ID, err)
+ }
+ return fmt.Errorf("failed to allocate network IP for task %s network %s: %v", t.ID, nAttach.Network.ID, err)
+ }
+ }
+
+ return nil
+}
+
+// DeallocateTask releases all the endpoint resources for all the
+// networks that a task is attached to.
+func (na *NetworkAllocator) DeallocateTask(t *api.Task) error {
+ return na.releaseEndpoints(t.Networks)
+}
+
+func (na *NetworkAllocator) releaseEndpoints(networks []*api.NetworkAttachment) error {
+ for _, nAttach := range networks {
+ ipam, _, err := na.resolveIPAM(nAttach.Network)
+ if err != nil {
+ return fmt.Errorf("failed to resolve IPAM while allocating : %v", err)
+ }
+
+ localNet := na.getNetwork(nAttach.Network.ID)
+ if localNet == nil {
+ return fmt.Errorf("could not find network allocater state for network %s", nAttach.Network.ID)
+ }
+
+ // Do not fail and bail out if we fail to release IP
+ // address here. Keep going and try releasing as many
+ // addresses as possible.
+ for _, addr := range nAttach.Addresses {
+ // Retrieve the poolID and immediately nuke
+ // out the mapping.
+ poolID := localNet.endpoints[addr]
+ delete(localNet.endpoints, addr)
+
+ ip, _, err := net.ParseCIDR(addr)
+ if err != nil {
+ log.G(context.TODO()).Errorf("Could not parse IP address %s while releasing", addr)
+ continue
+ }
+
+ if err := ipam.ReleaseAddress(poolID, ip); err != nil {
+ log.G(context.TODO()).Errorf("IPAM failure while releasing IP address %s: %v", addr, err)
+ }
+ }
+
+ // Clear out the address list when we are done with
+ // this network.
+ nAttach.Addresses = nil
+ }
+
+ return nil
+}
+
+// allocate virtual IP for a single endpoint attachment of the service.
+func (na *NetworkAllocator) allocateVIP(vip *api.Endpoint_VirtualIP) error {
+ localNet := na.getNetwork(vip.NetworkID)
+ if localNet == nil {
+ return fmt.Errorf("networkallocator: could not find local network state")
+ }
+
+ ipam, _, err := na.resolveIPAM(localNet.nw)
+ if err != nil {
+ return fmt.Errorf("failed to resolve IPAM while allocating : %v", err)
+ }
+
+ for _, poolID := range localNet.pools {
+ ip, _, err := ipam.RequestAddress(poolID, nil, nil)
+ if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
+ return fmt.Errorf("could not allocate VIP from IPAM: %v", err)
+ }
+
+ // If we got an address then we are done.
+ if err == nil {
+ ipStr := ip.String()
+ localNet.endpoints[ipStr] = poolID
+ vip.Addr = ipStr
+ return nil
+ }
+ }
+
+ return fmt.Errorf("could not find an available IP while allocating VIP")
+}
+
+func (na *NetworkAllocator) deallocateVIP(vip *api.Endpoint_VirtualIP) error {
+ localNet := na.getNetwork(vip.NetworkID)
+ if localNet == nil {
+ return fmt.Errorf("networkallocator: could not find local network state")
+ }
+
+ ipam, _, err := na.resolveIPAM(localNet.nw)
+ if err != nil {
+ return fmt.Errorf("failed to resolve IPAM while allocating : %v", err)
+ }
+
+ // Retrieve the poolID and immediately nuke
+ // out the mapping.
+ poolID := localNet.endpoints[vip.Addr]
+ delete(localNet.endpoints, vip.Addr)
+
+ ip, _, err := net.ParseCIDR(vip.Addr)
+ if err != nil {
+ log.G(context.TODO()).Errorf("Could not parse VIP address %s while releasing", vip.Addr)
+ return err
+ }
+
+ if err := ipam.ReleaseAddress(poolID, ip); err != nil {
+ log.G(context.TODO()).Errorf("IPAM failure while releasing VIP address %s: %v", vip.Addr, err)
+ return err
+ }
+
+ return nil
+}
+
+// allocate the IP addresses for a single network attachment of the task.
+func (na *NetworkAllocator) allocateNetworkIPs(nAttach *api.NetworkAttachment) error {
+ var ip *net.IPNet
+
+ ipam, _, err := na.resolveIPAM(nAttach.Network)
+ if err != nil {
+ return fmt.Errorf("failed to resolve IPAM while allocating : %v", err)
+ }
+
+ localNet := na.getNetwork(nAttach.Network.ID)
+ if localNet == nil {
+ return fmt.Errorf("could not find network allocator state for network %s", nAttach.Network.ID)
+ }
+
+ addresses := nAttach.Addresses
+ if addresses == nil {
+ addresses = []string{""}
+ }
+
+ for i, rawAddr := range addresses {
+ var addr net.IP
+ if rawAddr != "" {
+ var err error
+ addr, _, err = net.ParseCIDR(rawAddr)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, poolID := range localNet.pools {
+ var err error
+
+ ip, _, err = ipam.RequestAddress(poolID, addr, nil)
+ if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
+ return fmt.Errorf("could not allocate IP from IPAM: %v", err)
+ }
+
+ // If we got an address then we are done.
+ if err == nil {
+ ipStr := ip.String()
+ localNet.endpoints[ipStr] = poolID
+ addresses[i] = ipStr
+ nAttach.Addresses = addresses
+ return nil
+ }
+ }
+ }
+
+ return fmt.Errorf("could not find an available IP")
+}
+
+func (na *NetworkAllocator) freeDriverState(n *api.Network) error {
+ d, _, err := na.resolveDriver(n)
+ if err != nil {
+ return err
+ }
+
+ return d.NetworkFree(n.ID)
+}
+
+func (na *NetworkAllocator) allocateDriverState(n *api.Network) error {
+ d, dName, err := na.resolveDriver(n)
+ if err != nil {
+ return err
+ }
+
+ var options map[string]string
+ if n.Spec.DriverConfig != nil {
+ options = n.Spec.DriverConfig.Options
+ }
+
+ // Construct IPAM data for driver consumption.
+ ipv4Data := make([]driverapi.IPAMData, 0, len(n.IPAM.Configs))
+ for _, ic := range n.IPAM.Configs {
+ if ic.Family == api.IPAMConfig_IPV6 {
+ continue
+ }
+
+ _, subnet, err := net.ParseCIDR(ic.Subnet)
+ if err != nil {
+ return fmt.Errorf("error parsing subnet %s while allocating driver state: %v", ic.Subnet, err)
+ }
+
+ gwIP := net.ParseIP(ic.Gateway)
+ gwNet := &net.IPNet{
+ IP: gwIP,
+ Mask: subnet.Mask,
+ }
+
+ data := driverapi.IPAMData{
+ Pool: subnet,
+ Gateway: gwNet,
+ }
+
+ ipv4Data = append(ipv4Data, data)
+ }
+
+ ds, err := d.NetworkAllocate(n.ID, options, ipv4Data, nil)
+ if err != nil {
+ return err
+ }
+
+ // Update network object with the obtained driver state.
+ n.DriverState = &api.Driver{
+ Name: dName,
+ Options: ds,
+ }
+
+ return nil
+}
+
+// Resolve network driver
+func (na *NetworkAllocator) resolveDriver(n *api.Network) (driverapi.Driver, string, error) {
+ dName := defaultDriver
+ if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name != "" {
+ dName = n.Spec.DriverConfig.Name
+ }
+
+ d, _ := na.drvRegistry.Driver(dName)
+ if d == nil {
+ return nil, "", fmt.Errorf("could not resolve network driver %s", dName)
+ }
+
+ return d, dName, nil
+}
+
+// Resolve the IPAM driver
+func (na *NetworkAllocator) resolveIPAM(n *api.Network) (ipamapi.Ipam, string, error) {
+ dName := ipamapi.DefaultIPAM
+ if n.Spec.IPAM != nil && n.Spec.IPAM.Driver != nil && n.Spec.IPAM.Driver.Name != "" {
+ dName = n.Spec.IPAM.Driver.Name
+ }
+
+ ipam, _ := na.drvRegistry.IPAM(dName)
+ if ipam == nil {
+ return nil, "", fmt.Errorf("could not resolve IPAM driver %s", dName)
+ }
+
+ return ipam, dName, nil
+}
+
+func (na *NetworkAllocator) freePools(n *api.Network, pools map[string]string) error {
+ ipam, _, err := na.resolveIPAM(n)
+ if err != nil {
+ return fmt.Errorf("failed to resolve IPAM while freeing pools for network %s: %v", n.ID, err)
+ }
+
+ releasePools(ipam, n.IPAM.Configs, pools)
+ return nil
+}
+
+func releasePools(ipam ipamapi.Ipam, icList []*api.IPAMConfig, pools map[string]string) {
+ for _, ic := range icList {
+ if err := ipam.ReleaseAddress(pools[ic.Subnet], net.ParseIP(ic.Gateway)); err != nil {
+ log.G(context.TODO()).Errorf("Failed to release address %s: %v", ic.Subnet, err)
+ }
+ }
+
+ for k, p := range pools {
+ if err := ipam.ReleasePool(p); err != nil {
+ log.G(context.TODO()).Errorf("Failed to release pool %s: %v", k, err)
+ }
+ }
+}
+
+func (na *NetworkAllocator) allocatePools(n *api.Network) (map[string]string, error) {
+ ipam, dName, err := na.resolveIPAM(n)
+ if err != nil {
+ return nil, err
+ }
+
+ // We don't support user defined address spaces yet so just
+ // retrive default address space names for the driver.
+ _, asName, err := na.drvRegistry.IPAMDefaultAddressSpaces(dName)
+ if err != nil {
+ return nil, err
+ }
+
+ pools := make(map[string]string)
+
+ if n.Spec.IPAM == nil {
+ n.Spec.IPAM = &api.IPAMOptions{}
+ }
+
+ ipamConfigs := make([]*api.IPAMConfig, len(n.Spec.IPAM.Configs))
+ copy(ipamConfigs, n.Spec.IPAM.Configs)
+
+ // If there is non-nil IPAM state always prefer those subnet
+ // configs over Spec configs.
+ if n.IPAM != nil {
+ ipamConfigs = n.IPAM.Configs
+ }
+
+ // Append an empty slot for subnet allocation if there are no
+ // IPAM configs from either spec or state.
+ if len(ipamConfigs) == 0 {
+ ipamConfigs = append(ipamConfigs, &api.IPAMConfig{Family: api.IPAMConfig_IPV4})
+ }
+
+ // Update the runtime IPAM configurations with initial state
+ n.IPAM = &api.IPAMOptions{
+ Driver: &api.Driver{Name: dName},
+ Configs: ipamConfigs,
+ }
+
+ for i, ic := range ipamConfigs {
+ poolID, poolIP, _, err := ipam.RequestPool(asName, ic.Subnet, ic.Range, nil, false)
+ if err != nil {
+ // Rollback by releasing all the resources allocated so far.
+ releasePools(ipam, ipamConfigs[:i], pools)
+ return nil, err
+ }
+ pools[poolIP.String()] = poolID
+
+ gwIP, _, err := ipam.RequestAddress(poolID, net.ParseIP(ic.Gateway), nil)
+ if err != nil {
+ // Rollback by releasing all the resources allocated so far.
+ releasePools(ipam, ipamConfigs[:i], pools)
+ return nil, err
+ }
+
+ if ic.Subnet == "" {
+ ic.Subnet = poolIP.String()
+ }
+
+ if ic.Gateway == "" {
+ ic.Gateway = gwIP.IP.String()
+ }
+
+ }
+
+ return pools, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go b/vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go
new file mode 100644
index 0000000000..10843b73e4
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go
@@ -0,0 +1,244 @@
+package networkallocator
+
+import (
+ "fmt"
+
+ "github.com/docker/libnetwork/idm"
+ "github.com/docker/swarmkit/api"
+)
+
+const (
+ // Start of the dynamic port range from which node ports will
+ // be allocated when the user did not specify a port.
+ dynamicPortStart = 30000
+
+ // End of the dynamic port range from which node ports will be
+ // allocated when the user did not specify a port.
+ dynamicPortEnd = 32767
+
+ // The start of master port range which will hold all the
+ // allocation state of ports allocated so far regerdless of
+ // whether it was user defined or not.
+ masterPortStart = 1
+
+ // The end of master port range which will hold all the
+ // allocation state of ports allocated so far regerdless of
+ // whether it was user defined or not.
+ masterPortEnd = 65535
+)
+
+type portAllocator struct {
+ // portspace definition per protocol
+ portSpaces map[api.PortConfig_Protocol]*portSpace
+}
+
+type portSpace struct {
+ protocol api.PortConfig_Protocol
+ masterPortSpace *idm.Idm
+ dynamicPortSpace *idm.Idm
+}
+
+func newPortAllocator() (*portAllocator, error) {
+ portSpaces := make(map[api.PortConfig_Protocol]*portSpace)
+ for _, protocol := range []api.PortConfig_Protocol{api.ProtocolTCP, api.ProtocolUDP} {
+ ps, err := newPortSpace(protocol)
+ if err != nil {
+ return nil, err
+ }
+
+ portSpaces[protocol] = ps
+ }
+
+ return &portAllocator{portSpaces: portSpaces}, nil
+}
+
+func newPortSpace(protocol api.PortConfig_Protocol) (*portSpace, error) {
+ masterName := fmt.Sprintf("%s-master-ports", protocol)
+ dynamicName := fmt.Sprintf("%s-dynamic-ports", protocol)
+
+ master, err := idm.New(nil, masterName, masterPortStart, masterPortEnd)
+ if err != nil {
+ return nil, err
+ }
+
+ dynamic, err := idm.New(nil, dynamicName, dynamicPortStart, dynamicPortEnd)
+ if err != nil {
+ return nil, err
+ }
+
+ return &portSpace{
+ protocol: protocol,
+ masterPortSpace: master,
+ dynamicPortSpace: dynamic,
+ }, nil
+}
+
+func reconcilePortConfigs(s *api.Service) []*api.PortConfig {
+ // If runtime state hasn't been created or if port config has
+ // changed from port state return the port config from Spec.
+ if s.Endpoint == nil || len(s.Spec.Endpoint.Ports) != len(s.Endpoint.Ports) {
+ return s.Spec.Endpoint.Ports
+ }
+
+ var portConfigs []*api.PortConfig
+ for i, portConfig := range s.Spec.Endpoint.Ports {
+ portState := s.Endpoint.Ports[i]
+
+ // If the portConfig is exactly the same as portState
+ // except if SwarmPort is not user-define then prefer
+ // portState to ensure sticky allocation of the same
+ // port that was allocated before.
+ if portConfig.Name == portState.Name &&
+ portConfig.TargetPort == portState.TargetPort &&
+ portConfig.Protocol == portState.Protocol &&
+ portConfig.PublishedPort == 0 {
+ portConfigs = append(portConfigs, portState)
+ continue
+ }
+
+ // For all other cases prefer the portConfig
+ portConfigs = append(portConfigs, portConfig)
+ }
+
+ return portConfigs
+}
+
+func (pa *portAllocator) serviceAllocatePorts(s *api.Service) (err error) {
+ if s.Spec.Endpoint == nil {
+ return nil
+ }
+
+ // We might have previous allocations which we want to stick
+ // to if possible. So instead of strictly going by port
+ // configs in the Spec reconcile the list of port configs from
+ // both the Spec and runtime state.
+ portConfigs := reconcilePortConfigs(s)
+
+ // Port configuration might have changed. Cleanup all old allocations first.
+ pa.serviceDeallocatePorts(s)
+
+ defer func() {
+ if err != nil {
+ // Free all the ports allocated so far which
+ // should be present in s.Endpoints.ExposedPorts
+ pa.serviceDeallocatePorts(s)
+ }
+ }()
+
+ for _, portConfig := range portConfigs {
+ // Make a copy of port config to create runtime state
+ portState := portConfig.Copy()
+ if err = pa.portSpaces[portState.Protocol].allocate(portState); err != nil {
+ return
+ }
+
+ if s.Endpoint == nil {
+ s.Endpoint = &api.Endpoint{}
+ }
+
+ s.Endpoint.Ports = append(s.Endpoint.Ports, portState)
+ }
+
+ return nil
+}
+
+func (pa *portAllocator) serviceDeallocatePorts(s *api.Service) {
+ if s.Endpoint == nil {
+ return
+ }
+
+ for _, portState := range s.Endpoint.Ports {
+ pa.portSpaces[portState.Protocol].free(portState)
+ }
+
+ s.Endpoint.Ports = nil
+}
+
+func (pa *portAllocator) isPortsAllocated(s *api.Service) bool {
+ if s.Endpoint == nil {
+ return false
+ }
+
+ // If we don't have same number of port states as port configs
+ // we assume it is not allocated.
+ if len(s.Spec.Endpoint.Ports) != len(s.Endpoint.Ports) {
+ return false
+ }
+
+ for i, portConfig := range s.Spec.Endpoint.Ports {
+ // The port configuration slice and port state slice
+ // are expected to be in the same order.
+ portState := s.Endpoint.Ports[i]
+
+ // If name, port, protocol values don't match then we
+ // are not allocated.
+ if portConfig.Name != portState.Name ||
+ portConfig.TargetPort != portState.TargetPort ||
+ portConfig.Protocol != portState.Protocol {
+ return false
+ }
+
+ // If SwarmPort was user defined but the port state
+ // SwarmPort doesn't match we are not allocated.
+ if portConfig.PublishedPort != portState.PublishedPort &&
+ portConfig.PublishedPort != 0 {
+ return false
+ }
+
+ // If SwarmPort was not defined by user and port state
+ // is not initialized with a valid SwarmPort value then
+ // we are not allocated.
+ if portConfig.PublishedPort == 0 && portState.PublishedPort == 0 {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (ps *portSpace) allocate(p *api.PortConfig) (err error) {
+ if p.PublishedPort != 0 {
+ // If it falls in the dynamic port range check out
+ // from dynamic port space first.
+ if p.PublishedPort >= dynamicPortStart && p.PublishedPort <= dynamicPortEnd {
+ if err = ps.dynamicPortSpace.GetSpecificID(uint64(p.PublishedPort)); err != nil {
+ return err
+ }
+
+ defer func() {
+ if err != nil {
+ ps.dynamicPortSpace.Release(uint64(p.PublishedPort))
+ }
+ }()
+ }
+
+ return ps.masterPortSpace.GetSpecificID(uint64(p.PublishedPort))
+ }
+
+ // Check out an arbitrary port from dynamic port space.
+ swarmPort, err := ps.dynamicPortSpace.GetID()
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ ps.dynamicPortSpace.Release(uint64(swarmPort))
+ }
+ }()
+
+ // Make sure we allocate the same port from the master space.
+ if err = ps.masterPortSpace.GetSpecificID(uint64(swarmPort)); err != nil {
+ return
+ }
+
+ p.PublishedPort = uint32(swarmPort)
+ return nil
+}
+
+func (ps *portSpace) free(p *api.PortConfig) {
+ if p.PublishedPort >= dynamicPortStart && p.PublishedPort <= dynamicPortEnd {
+ ps.dynamicPortSpace.Release(uint64(p.PublishedPort))
+ }
+
+ ps.masterPortSpace.Release(uint64(p.PublishedPort))
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/controlapi/cluster.go b/vendor/src/github.com/docker/swarmkit/manager/controlapi/cluster.go
new file mode 100644
index 0000000000..b138be5a5d
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/controlapi/cluster.go
@@ -0,0 +1,197 @@
+package controlapi
+
+import (
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/ca"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+func validateClusterSpec(spec *api.ClusterSpec) error {
+ if spec == nil {
+ return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ // Validate that duration being provided is valid, and over our minimum
+ if spec.CAConfig.NodeCertExpiry != nil {
+ expiry, err := ptypes.Duration(spec.CAConfig.NodeCertExpiry)
+ if err != nil {
+ return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+ if expiry < ca.MinNodeCertExpiration {
+ return grpc.Errorf(codes.InvalidArgument, "minimum certificate expiry time is: %s", ca.MinNodeCertExpiration)
+ }
+ }
+
+ // Validate that AcceptancePolicies only include Secrets that are bcrypted
+ // TODO(diogo): Add a global list of acceptace algorithms. We only support bcrypt for now.
+ if len(spec.AcceptancePolicy.Policies) > 0 {
+ for _, policy := range spec.AcceptancePolicy.Policies {
+ if policy.Secret != nil && strings.ToLower(policy.Secret.Alg) != "bcrypt" {
+ return grpc.Errorf(codes.InvalidArgument, "hashing algorithm is not supported: %s", policy.Secret.Alg)
+ }
+ }
+ }
+
+ return nil
+}
+
+// GetCluster returns a Cluster given a ClusterID.
+// - Returns `InvalidArgument` if ClusterID is not provided.
+// - Returns `NotFound` if the Cluster is not found.
+func (s *Server) GetCluster(ctx context.Context, request *api.GetClusterRequest) (*api.GetClusterResponse, error) {
+ if request.ClusterID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ var cluster *api.Cluster
+ s.store.View(func(tx store.ReadTx) {
+ cluster = store.GetCluster(tx, request.ClusterID)
+ })
+ if cluster == nil {
+ return nil, grpc.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
+ }
+
+ redactedClusters := redactClusters([]*api.Cluster{cluster})
+
+ // WARN: we should never return cluster here. We need to redact the private fields first.
+ return &api.GetClusterResponse{
+ Cluster: redactedClusters[0],
+ }, nil
+}
+
+// UpdateCluster updates a Cluster referenced by ClusterID with the given ClusterSpec.
+// - Returns `NotFound` if the Cluster is not found.
+// - Returns `InvalidArgument` if the ClusterSpec is malformed.
+// - Returns `Unimplemented` if the ClusterSpec references unimplemented features.
+// - Returns an error if the update fails.
+func (s *Server) UpdateCluster(ctx context.Context, request *api.UpdateClusterRequest) (*api.UpdateClusterResponse, error) {
+ if request.ClusterID == "" || request.ClusterVersion == nil {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+ if err := validateClusterSpec(request.Spec); err != nil {
+ return nil, err
+ }
+
+ var cluster *api.Cluster
+ err := s.store.Update(func(tx store.Tx) error {
+ cluster = store.GetCluster(tx, request.ClusterID)
+ if cluster == nil {
+ return nil
+ }
+ cluster.Meta.Version = *request.ClusterVersion
+ cluster.Spec = *request.Spec.Copy()
+ return store.UpdateCluster(tx, cluster)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if cluster == nil {
+ return nil, grpc.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
+ }
+
+ redactedClusters := redactClusters([]*api.Cluster{cluster})
+
+ // WARN: we should never return cluster here. We need to redact the private fields first.
+ return &api.UpdateClusterResponse{
+ Cluster: redactedClusters[0],
+ }, nil
+}
+
+func filterClusters(candidates []*api.Cluster, filters ...func(*api.Cluster) bool) []*api.Cluster {
+ result := []*api.Cluster{}
+
+ for _, c := range candidates {
+ match := true
+ for _, f := range filters {
+ if !f(c) {
+ match = false
+ break
+ }
+ }
+ if match {
+ result = append(result, c)
+ }
+ }
+
+ return result
+}
+
+// ListClusters returns a list of all clusters.
+func (s *Server) ListClusters(ctx context.Context, request *api.ListClustersRequest) (*api.ListClustersResponse, error) {
+ var (
+ clusters []*api.Cluster
+ err error
+ )
+ s.store.View(func(tx store.ReadTx) {
+ switch {
+ case request.Filters != nil && len(request.Filters.Names) > 0:
+ clusters, err = store.FindClusters(tx, buildFilters(store.ByName, request.Filters.Names))
+ case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
+ clusters, err = store.FindClusters(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
+ default:
+ clusters, err = store.FindClusters(tx, store.All)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if request.Filters != nil {
+ clusters = filterClusters(clusters,
+ func(e *api.Cluster) bool {
+ return filterContains(e.Spec.Annotations.Name, request.Filters.Names)
+ },
+ func(e *api.Cluster) bool {
+ return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
+ },
+ func(e *api.Cluster) bool {
+ return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
+ },
+ )
+ }
+
+ // WARN: we should never return cluster here. We need to redact the private fields first.
+ return &api.ListClustersResponse{
+ Clusters: redactClusters(clusters),
+ }, nil
+}
+
+// redactClusters is a method that enforces a whitelist of fields that are ok to be
+// returned in the Cluster object. It should filter out all senstive information.
+func redactClusters(clusters []*api.Cluster) []*api.Cluster {
+ var redactedClusters []*api.Cluster
+ // Only add public fields to the new clusters
+ for _, cluster := range clusters {
+ // Copy all the mandatory fields
+ newCluster := &api.Cluster{
+ ID: cluster.ID,
+ Meta: cluster.Meta,
+ Spec: cluster.Spec,
+ RootCA: api.RootCA{
+ CACert: cluster.RootCA.CACert,
+ CACertHash: cluster.RootCA.CACertHash,
+ },
+ }
+
+ // Redact the acceptance policy secrets
+ if len(newCluster.Spec.AcceptancePolicy.Policies) > 0 {
+ for _, policy := range newCluster.Spec.AcceptancePolicy.Policies {
+ // Adding [REDACTED] to the api client so they know there is a
+ // a secret configured, but without telling them what it is.
+ if policy.Secret != nil {
+ policy.Secret.Data = []byte("[REDACTED]")
+ }
+
+ }
+ }
+ redactedClusters = append(redactedClusters, newCluster)
+ }
+
+ return redactedClusters
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/controlapi/common.go b/vendor/src/github.com/docker/swarmkit/manager/controlapi/common.go
new file mode 100644
index 0000000000..3dacf98e22
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/controlapi/common.go
@@ -0,0 +1,86 @@
+package controlapi
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state/store"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+var isValidName = regexp.MustCompile(`^[a-zA-Z0-9](?:[-_]*[A-Za-z0-9]+)*$`)
+
+func buildFilters(by func(string) store.By, values []string) store.By {
+ filters := make([]store.By, 0, len(values))
+ for _, v := range values {
+ filters = append(filters, by(v))
+ }
+ return store.Or(filters...)
+}
+
+func filterContains(match string, candidates []string) bool {
+ if len(candidates) == 0 {
+ return true
+ }
+ for _, c := range candidates {
+ if c == match {
+ return true
+ }
+ }
+ return false
+}
+
+func filterContainsPrefix(match string, candidates []string) bool {
+ if len(candidates) == 0 {
+ return true
+ }
+ for _, c := range candidates {
+ if strings.HasPrefix(match, c) {
+ return true
+ }
+ }
+ return false
+}
+
+func filterMatchLabels(match map[string]string, candidates map[string]string) bool {
+ if len(candidates) == 0 {
+ return true
+ }
+
+ for k, v := range candidates {
+ c, ok := match[k]
+ if !ok {
+ return false
+ }
+ if v != "" && v != c {
+ return false
+ }
+ }
+ return true
+}
+
+func validateAnnotations(m api.Annotations) error {
+ if m.Name == "" {
+ return grpc.Errorf(codes.InvalidArgument, "meta: name must be provided")
+ } else if !isValidName.MatchString(m.Name) {
+ // if the name doesn't match the regex
+ return grpc.Errorf(codes.InvalidArgument, "invalid name, only [a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9] are allowed")
+ }
+ return nil
+}
+
+func validateDriver(driver *api.Driver) error {
+ if driver == nil {
+ // It is ok to not specify the driver. We will choose
+ // a default driver.
+ return nil
+ }
+
+ if driver.Name == "" {
+ return grpc.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/controlapi/network.go b/vendor/src/github.com/docker/swarmkit/manager/controlapi/network.go
new file mode 100644
index 0000000000..35fcd70633
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/controlapi/network.go
@@ -0,0 +1,244 @@
+package controlapi
+
+import (
+ "net"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+func validateIPAMConfiguration(ipamConf *api.IPAMConfig) error {
+ if ipamConf == nil {
+ return grpc.Errorf(codes.InvalidArgument, "ipam configuration: cannot be empty")
+ }
+
+ _, subnet, err := net.ParseCIDR(ipamConf.Subnet)
+ if err != nil {
+ return grpc.Errorf(codes.InvalidArgument, "ipam configuration: invalid subnet %s", ipamConf.Subnet)
+ }
+
+ if ipamConf.Range != "" {
+ ip, _, err := net.ParseCIDR(ipamConf.Range)
+ if err != nil {
+ return grpc.Errorf(codes.InvalidArgument, "ipam configuration: invalid range %s", ipamConf.Range)
+ }
+
+ if !subnet.Contains(ip) {
+ return grpc.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain range %s", ipamConf.Subnet, ipamConf.Range)
+ }
+ }
+
+ if ipamConf.Gateway != "" {
+ ip := net.ParseIP(ipamConf.Gateway)
+ if ip == nil {
+ return grpc.Errorf(codes.InvalidArgument, "ipam configuration: invalid gateway %s", ipamConf.Gateway)
+ }
+
+ if !subnet.Contains(ip) {
+ return grpc.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain gateway %s", ipamConf.Subnet, ipamConf.Gateway)
+ }
+ }
+
+ return nil
+}
+
+func validateIPAM(ipam *api.IPAMOptions) error {
+ if ipam == nil {
+ // It is ok to not specify any IPAM configurations. We
+ // will choose good defaults.
+ return nil
+ }
+
+ if err := validateDriver(ipam.Driver); err != nil {
+ return err
+ }
+
+ for _, ipamConf := range ipam.Configs {
+ if err := validateIPAMConfiguration(ipamConf); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func validateNetworkSpec(spec *api.NetworkSpec) error {
+ if spec == nil {
+ return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ if err := validateAnnotations(spec.Annotations); err != nil {
+ return err
+ }
+
+ if err := validateDriver(spec.DriverConfig); err != nil {
+ return err
+ }
+
+ if err := validateIPAM(spec.IPAM); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CreateNetwork creates and returns a Network based on the provided NetworkSpec.
+// - Returns `InvalidArgument` if the NetworkSpec is malformed.
+// - Returns an error if the creation fails.
+func (s *Server) CreateNetwork(ctx context.Context, request *api.CreateNetworkRequest) (*api.CreateNetworkResponse, error) {
+ // if you change this function, you have to change createInternalNetwork in
+ // the tests to match it (except the part where we check the label).
+ if err := validateNetworkSpec(request.Spec); err != nil {
+ return nil, err
+ }
+
+ if _, ok := request.Spec.Annotations.Labels["com.docker.swarm.internal"]; ok {
+ return nil, grpc.Errorf(codes.PermissionDenied, "label com.docker.swarm.internal is for predefined internal networks and cannot be applied by users")
+ }
+
+ // TODO(mrjana): Consider using `Name` as a primary key to handle
+ // duplicate creations. See #65
+ n := &api.Network{
+ ID: identity.NewID(),
+ Spec: *request.Spec,
+ }
+
+ err := s.store.Update(func(tx store.Tx) error {
+ return store.CreateNetwork(tx, n)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &api.CreateNetworkResponse{
+ Network: n,
+ }, nil
+}
+
+// GetNetwork returns a Network given a NetworkID.
+// - Returns `InvalidArgument` if NetworkID is not provided.
+// - Returns `NotFound` if the Network is not found.
+func (s *Server) GetNetwork(ctx context.Context, request *api.GetNetworkRequest) (*api.GetNetworkResponse, error) {
+ if request.NetworkID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ var n *api.Network
+ s.store.View(func(tx store.ReadTx) {
+ n = store.GetNetwork(tx, request.NetworkID)
+ })
+ if n == nil {
+ return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
+ }
+ return &api.GetNetworkResponse{
+ Network: n,
+ }, nil
+}
+
+// RemoveNetwork removes a Network referenced by NetworkID.
+// - Returns `InvalidArgument` if NetworkID is not provided.
+// - Returns `NotFound` if the Network is not found.
+// - Returns an error if the deletion fails.
+func (s *Server) RemoveNetwork(ctx context.Context, request *api.RemoveNetworkRequest) (*api.RemoveNetworkResponse, error) {
+ var (
+ services []*api.Service
+ err error
+ )
+
+ if request.NetworkID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ s.store.View(func(tx store.ReadTx) {
+ services, err = store.FindServices(tx, store.All)
+ })
+ if err != nil {
+ return nil, grpc.Errorf(codes.Internal, "could not find services using network %s", request.NetworkID)
+ }
+
+ for _, s := range services {
+ for _, na := range s.Spec.Networks {
+ if na.Target == request.NetworkID {
+ return nil, grpc.Errorf(codes.FailedPrecondition, "network %s is in use", request.NetworkID)
+ }
+ }
+ }
+
+ err = s.store.Update(func(tx store.Tx) error {
+ nw := store.GetNetwork(tx, request.NetworkID)
+ if _, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"]; ok {
+ return grpc.Errorf(codes.PermissionDenied, "%s is a pre-defined network and cannot be removed", request.NetworkID)
+ }
+ return store.DeleteNetwork(tx, request.NetworkID)
+ })
+ if err != nil {
+ if err == store.ErrNotExist {
+ return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
+ }
+ return nil, err
+ }
+ return &api.RemoveNetworkResponse{}, nil
+}
+
+func filterNetworks(candidates []*api.Network, filters ...func(*api.Network) bool) []*api.Network {
+ result := []*api.Network{}
+
+ for _, c := range candidates {
+ match := true
+ for _, f := range filters {
+ if !f(c) {
+ match = false
+ break
+ }
+ }
+ if match {
+ result = append(result, c)
+ }
+ }
+
+ return result
+}
+
+// ListNetworks returns a list of all networks.
+func (s *Server) ListNetworks(ctx context.Context, request *api.ListNetworksRequest) (*api.ListNetworksResponse, error) {
+ var (
+ networks []*api.Network
+ err error
+ )
+
+ s.store.View(func(tx store.ReadTx) {
+ switch {
+ case request.Filters != nil && len(request.Filters.Names) > 0:
+ networks, err = store.FindNetworks(tx, buildFilters(store.ByName, request.Filters.Names))
+ case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
+ networks, err = store.FindNetworks(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
+ default:
+ networks, err = store.FindNetworks(tx, store.All)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if request.Filters != nil {
+ networks = filterNetworks(networks,
+ func(e *api.Network) bool {
+ return filterContains(e.Spec.Annotations.Name, request.Filters.Names)
+ },
+ func(e *api.Network) bool {
+ return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
+ },
+ func(e *api.Network) bool {
+ return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
+ },
+ )
+ }
+
+ return &api.ListNetworksResponse{
+ Networks: networks,
+ }, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/controlapi/node.go b/vendor/src/github.com/docker/swarmkit/manager/controlapi/node.go
new file mode 100644
index 0000000000..442c12c5c7
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/controlapi/node.go
@@ -0,0 +1,243 @@
+package controlapi
+
+import (
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+func validateNodeSpec(spec *api.NodeSpec) error {
+ if spec == nil {
+ return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+ return nil
+}
+
+// GetNode returns a Node given a NodeID.
+// - Returns `InvalidArgument` if NodeID is not provided.
+// - Returns `NotFound` if the Node is not found.
+func (s *Server) GetNode(ctx context.Context, request *api.GetNodeRequest) (*api.GetNodeResponse, error) {
+ if request.NodeID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ var node *api.Node
+ s.store.View(func(tx store.ReadTx) {
+ node = store.GetNode(tx, request.NodeID)
+ })
+ if node == nil {
+ return nil, grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
+ }
+
+ if s.raft != nil {
+ memberlist := s.raft.GetMemberlist()
+ raftID, err := identity.ParseNodeID(request.NodeID)
+ if err == nil && memberlist[raftID] != nil {
+ node.ManagerStatus = &api.ManagerStatus{Raft: *memberlist[raftID]}
+ }
+ }
+
+ return &api.GetNodeResponse{
+ Node: node,
+ }, nil
+}
+
+func filterNodes(candidates []*api.Node, filters ...func(*api.Node) bool) []*api.Node {
+ result := []*api.Node{}
+
+ for _, c := range candidates {
+ match := true
+ for _, f := range filters {
+ if !f(c) {
+ match = false
+ break
+ }
+ }
+ if match {
+ result = append(result, c)
+ }
+ }
+
+ return result
+}
+
+// ListNodes returns a list of all nodes.
+func (s *Server) ListNodes(ctx context.Context, request *api.ListNodesRequest) (*api.ListNodesResponse, error) {
+ var (
+ nodes []*api.Node
+ err error
+ )
+ s.store.View(func(tx store.ReadTx) {
+ switch {
+ case request.Filters != nil && len(request.Filters.Names) > 0:
+ nodes, err = store.FindNodes(tx, buildFilters(store.ByName, request.Filters.Names))
+ case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
+ nodes, err = store.FindNodes(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
+ case request.Filters != nil && len(request.Filters.Roles) > 0:
+ filters := make([]store.By, 0, len(request.Filters.Roles))
+ for _, v := range request.Filters.Roles {
+ filters = append(filters, store.ByRole(v))
+ }
+ nodes, err = store.FindNodes(tx, store.Or(filters...))
+ case request.Filters != nil && len(request.Filters.Memberships) > 0:
+ filters := make([]store.By, 0, len(request.Filters.Memberships))
+ for _, v := range request.Filters.Memberships {
+ filters = append(filters, store.ByMembership(v))
+ }
+ nodes, err = store.FindNodes(tx, store.Or(filters...))
+ default:
+ nodes, err = store.FindNodes(tx, store.All)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if request.Filters != nil {
+ nodes = filterNodes(nodes,
+ func(e *api.Node) bool {
+ if len(request.Filters.Names) == 0 {
+ return true
+ }
+ if e.Description == nil {
+ return false
+ }
+ return filterContains(e.Description.Hostname, request.Filters.Names)
+ },
+ func(e *api.Node) bool {
+ return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
+ },
+ func(e *api.Node) bool {
+ if len(request.Filters.Labels) == 0 {
+ return true
+ }
+ if e.Description == nil {
+ return false
+ }
+ return filterMatchLabels(e.Description.Engine.Labels, request.Filters.Labels)
+ },
+ func(e *api.Node) bool {
+ if len(request.Filters.Roles) == 0 {
+ return true
+ }
+ for _, c := range request.Filters.Roles {
+ if c == e.Spec.Role {
+ return true
+ }
+ }
+ return false
+ },
+ func(e *api.Node) bool {
+ if len(request.Filters.Memberships) == 0 {
+ return true
+ }
+ for _, c := range request.Filters.Memberships {
+ if c == e.Spec.Membership {
+ return true
+ }
+ }
+ return false
+ },
+ )
+ }
+
+ // Add in manager information on nodes that are managers
+ if s.raft != nil {
+ memberlist := s.raft.GetMemberlist()
+
+ for _, n := range nodes {
+ raftID, err := identity.ParseNodeID(n.ID)
+ if err == nil && memberlist[raftID] != nil {
+ n.ManagerStatus = &api.ManagerStatus{Raft: *memberlist[raftID]}
+ }
+ }
+ }
+
+ return &api.ListNodesResponse{
+ Nodes: nodes,
+ }, nil
+}
+
+// UpdateNode updates a Node referenced by NodeID with the given NodeSpec.
+// - Returns `NotFound` if the Node is not found.
+// - Returns `InvalidArgument` if the NodeSpec is malformed.
+// - Returns an error if the update fails.
+func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) {
+ if request.NodeID == "" || request.NodeVersion == nil {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+ if err := validateNodeSpec(request.Spec); err != nil {
+ return nil, err
+ }
+
+ var node *api.Node
+ err := s.store.Update(func(tx store.Tx) error {
+ node = store.GetNode(tx, request.NodeID)
+ if node == nil {
+ return nil
+ }
+
+ // Demotion sanity checks.
+ if node.Spec.Role == api.NodeRoleManager && request.Spec.Role == api.NodeRoleWorker {
+ managers, err := store.FindNodes(tx, store.ByRole(api.NodeRoleManager))
+ if err != nil {
+ return grpc.Errorf(codes.Internal, "internal store error: %v", err)
+ }
+ if len(managers) == 1 && managers[0].ID == node.ID {
+ return grpc.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm")
+ }
+ }
+
+ node.Meta.Version = *request.NodeVersion
+ node.Spec = *request.Spec.Copy()
+ return store.UpdateNode(tx, node)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if node == nil {
+ return nil, grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
+ }
+ return &api.UpdateNodeResponse{
+ Node: node,
+ }, nil
+}
+
+// RemoveNode updates a Node referenced by NodeID with the given NodeSpec.
+// - Returns NotFound if the Node is not found.
+// - Returns FailedPrecondition if the Node has manager role or not shut down.
+// - Returns InvalidArgument if NodeID or NodeVersion is not valid.
+// - Returns an error if the delete fails.
+func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest) (*api.RemoveNodeResponse, error) {
+ if request.NodeID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+ if s.raft != nil {
+ memberlist := s.raft.GetMemberlist()
+ raftID, err := identity.ParseNodeID(request.NodeID)
+ if err == nil && memberlist[raftID] != nil {
+ return nil, grpc.Errorf(codes.FailedPrecondition, "node %s is a cluster manager and is part of the quorum. It must be demoted to worker before removal", request.NodeID)
+ }
+ }
+
+ err := s.store.Update(func(tx store.Tx) error {
+ node := store.GetNode(tx, request.NodeID)
+ if node == nil {
+ return grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
+ }
+ if node.Spec.Role == api.NodeRoleManager {
+ return grpc.Errorf(codes.FailedPrecondition, "node %s role is set to manager. It should be demoted to worker for safe removal", request.NodeID)
+ }
+ if node.Status.State == api.NodeStatus_READY {
+ return grpc.Errorf(codes.FailedPrecondition, "node %s is not down and can't be removed", request.NodeID)
+ }
+ return store.DeleteNode(tx, request.NodeID)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &api.RemoveNodeResponse{}, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/controlapi/server.go b/vendor/src/github.com/docker/swarmkit/manager/controlapi/server.go
new file mode 100644
index 0000000000..aa8ed326e8
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/controlapi/server.go
@@ -0,0 +1,27 @@
+package controlapi
+
+import (
+ "errors"
+
+ "github.com/docker/swarmkit/manager/state/raft"
+ "github.com/docker/swarmkit/manager/state/store"
+)
+
+var (
+ errNotImplemented = errors.New("not implemented")
+ errInvalidArgument = errors.New("invalid argument")
+)
+
+// Server is the Cluster API gRPC server.
+type Server struct {
+ store *store.MemoryStore
+ raft *raft.Node
+}
+
+// NewServer creates a Cluster API server.
+func NewServer(store *store.MemoryStore, raft *raft.Node) *Server {
+ return &Server{
+ store: store,
+ raft: raft,
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go b/vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go
new file mode 100644
index 0000000000..1ef6680cd8
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go
@@ -0,0 +1,245 @@
+package controlapi
+
+import (
+ "github.com/docker/engine-api/types/reference"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+func validateResources(r *api.Resources) error {
+ if r == nil {
+ return nil
+ }
+
+ if r.NanoCPUs != 0 && r.NanoCPUs < 1e6 {
+ return grpc.Errorf(codes.InvalidArgument, "invalid cpu value %g: Must be at least %g", float64(r.NanoCPUs)/1e9, 1e6/1e9)
+ }
+
+ if r.MemoryBytes != 0 && r.MemoryBytes < 4*1024*1024 {
+ return grpc.Errorf(codes.InvalidArgument, "invalid memory value %d: Must be at least 4MiB", r.MemoryBytes)
+ }
+ return nil
+}
+
+func validateResourceRequirements(r *api.ResourceRequirements) error {
+ if r == nil {
+ return nil
+ }
+ if err := validateResources(r.Limits); err != nil {
+ return err
+ }
+ if err := validateResources(r.Reservations); err != nil {
+ return err
+ }
+ return nil
+}
+
+func validateServiceSpecTemplate(spec *api.ServiceSpec) error {
+ if err := validateResourceRequirements(spec.Task.Resources); err != nil {
+ return err
+ }
+
+ if spec.Task.GetRuntime() == nil {
+ return grpc.Errorf(codes.InvalidArgument, "TaskSpec: missing runtime")
+ }
+
+ _, ok := spec.Task.GetRuntime().(*api.TaskSpec_Container)
+ if !ok {
+ return grpc.Errorf(codes.Unimplemented, "RuntimeSpec: unimplemented runtime in service spec")
+ }
+
+ container := spec.Task.GetContainer()
+ if container == nil {
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: missing in service spec")
+ }
+
+ if container.Image == "" {
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided")
+ }
+
+ if _, _, err := reference.Parse(container.Image); err != nil {
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", container.Image)
+ }
+ return nil
+}
+
+func validateServiceSpec(spec *api.ServiceSpec) error {
+ if spec == nil {
+ return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+ if err := validateAnnotations(spec.Annotations); err != nil {
+ return err
+ }
+ if err := validateServiceSpecTemplate(spec); err != nil {
+ return err
+ }
+ return nil
+}
+
+// CreateService creates and return a Service based on the provided ServiceSpec.
+// - Returns `InvalidArgument` if the ServiceSpec is malformed.
+// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
+// - Returns `AlreadyExists` if the ServiceID conflicts.
+// - Returns an error if the creation fails.
+func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {
+ if err := validateServiceSpec(request.Spec); err != nil {
+ return nil, err
+ }
+
+ // TODO(aluzzardi): Consider using `Name` as a primary key to handle
+ // duplicate creations. See #65
+ service := &api.Service{
+ ID: identity.NewID(),
+ Spec: *request.Spec,
+ }
+
+ err := s.store.Update(func(tx store.Tx) error {
+ return store.CreateService(tx, service)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &api.CreateServiceResponse{
+ Service: service,
+ }, nil
+}
+
+// GetService returns a Service given a ServiceID.
+// - Returns `InvalidArgument` if ServiceID is not provided.
+// - Returns `NotFound` if the Service is not found.
+func (s *Server) GetService(ctx context.Context, request *api.GetServiceRequest) (*api.GetServiceResponse, error) {
+ if request.ServiceID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ var service *api.Service
+ s.store.View(func(tx store.ReadTx) {
+ service = store.GetService(tx, request.ServiceID)
+ })
+ if service == nil {
+ return nil, grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
+ }
+
+ return &api.GetServiceResponse{
+ Service: service,
+ }, nil
+}
+
+// UpdateService updates a Service referenced by ServiceID with the given ServiceSpec.
+// - Returns `NotFound` if the Service is not found.
+// - Returns `InvalidArgument` if the ServiceSpec is malformed.
+// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
+// - Returns an error if the update fails.
+func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRequest) (*api.UpdateServiceResponse, error) {
+ if request.ServiceID == "" || request.ServiceVersion == nil {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+ if err := validateServiceSpec(request.Spec); err != nil {
+ return nil, err
+ }
+
+ var service *api.Service
+ err := s.store.Update(func(tx store.Tx) error {
+ service = store.GetService(tx, request.ServiceID)
+ if service == nil {
+ return nil
+ }
+ service.Meta.Version = *request.ServiceVersion
+ service.Spec = *request.Spec.Copy()
+ return store.UpdateService(tx, service)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if service == nil {
+ return nil, grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
+ }
+ return &api.UpdateServiceResponse{
+ Service: service,
+ }, nil
+}
+
+// RemoveService removes a Service referenced by ServiceID.
+// - Returns `InvalidArgument` if ServiceID is not provided.
+// - Returns `NotFound` if the Service is not found.
+// - Returns an error if the deletion fails.
+func (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) {
+ if request.ServiceID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ err := s.store.Update(func(tx store.Tx) error {
+ return store.DeleteService(tx, request.ServiceID)
+ })
+ if err != nil {
+ if err == store.ErrNotExist {
+ return nil, grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
+ }
+ return nil, err
+ }
+ return &api.RemoveServiceResponse{}, nil
+}
+
+func filterServices(candidates []*api.Service, filters ...func(*api.Service) bool) []*api.Service {
+ result := []*api.Service{}
+
+ for _, c := range candidates {
+ match := true
+ for _, f := range filters {
+ if !f(c) {
+ match = false
+ break
+ }
+ }
+ if match {
+ result = append(result, c)
+ }
+ }
+
+ return result
+}
+
+// ListServices returns a list of all services.
+func (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequest) (*api.ListServicesResponse, error) {
+ var (
+ services []*api.Service
+ err error
+ )
+
+ s.store.View(func(tx store.ReadTx) {
+ switch {
+ case request.Filters != nil && len(request.Filters.Names) > 0:
+ services, err = store.FindServices(tx, buildFilters(store.ByName, request.Filters.Names))
+ case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
+ services, err = store.FindServices(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
+ default:
+ services, err = store.FindServices(tx, store.All)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if request.Filters != nil {
+ services = filterServices(services,
+ func(e *api.Service) bool {
+ return filterContains(e.Spec.Annotations.Name, request.Filters.Names)
+ },
+ func(e *api.Service) bool {
+ return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
+ },
+ func(e *api.Service) bool {
+ return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
+ },
+ )
+ }
+
+ return &api.ListServicesResponse{
+ Services: services,
+ }, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/controlapi/task.go b/vendor/src/github.com/docker/swarmkit/manager/controlapi/task.go
new file mode 100644
index 0000000000..5cca4e39b3
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/controlapi/task.go
@@ -0,0 +1,136 @@
+package controlapi
+
+import (
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+// GetTask returns a Task given a TaskID.
+// - Returns `InvalidArgument` if TaskID is not provided.
+// - Returns `NotFound` if the Task is not found.
+func (s *Server) GetTask(ctx context.Context, request *api.GetTaskRequest) (*api.GetTaskResponse, error) {
+ if request.TaskID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ var task *api.Task
+ s.store.View(func(tx store.ReadTx) {
+ task = store.GetTask(tx, request.TaskID)
+ })
+ if task == nil {
+ return nil, grpc.Errorf(codes.NotFound, "task %s not found", request.TaskID)
+ }
+ return &api.GetTaskResponse{
+ Task: task,
+ }, nil
+}
+
+// RemoveTask removes a Task referenced by TaskID.
+// - Returns `InvalidArgument` if TaskID is not provided.
+// - Returns `NotFound` if the Task is not found.
+// - Returns an error if the deletion fails.
+func (s *Server) RemoveTask(ctx context.Context, request *api.RemoveTaskRequest) (*api.RemoveTaskResponse, error) {
+ if request.TaskID == "" {
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ }
+
+ err := s.store.Update(func(tx store.Tx) error {
+ return store.DeleteTask(tx, request.TaskID)
+ })
+ if err != nil {
+ if err == store.ErrNotExist {
+ return nil, grpc.Errorf(codes.NotFound, "task %s not found", request.TaskID)
+ }
+ return nil, err
+ }
+ return &api.RemoveTaskResponse{}, nil
+}
+
+func filterTasks(candidates []*api.Task, filters ...func(*api.Task) bool) []*api.Task {
+ result := []*api.Task{}
+
+ for _, c := range candidates {
+ match := true
+ for _, f := range filters {
+ if !f(c) {
+ match = false
+ break
+ }
+ }
+ if match {
+ result = append(result, c)
+ }
+ }
+
+ return result
+}
+
+// ListTasks returns a list of all tasks.
+func (s *Server) ListTasks(ctx context.Context, request *api.ListTasksRequest) (*api.ListTasksResponse, error) {
+ var (
+ tasks []*api.Task
+ err error
+ )
+
+ s.store.View(func(tx store.ReadTx) {
+ switch {
+ case request.Filters != nil && len(request.Filters.Names) > 0:
+ tasks, err = store.FindTasks(tx, buildFilters(store.ByName, request.Filters.Names))
+ case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
+ tasks, err = store.FindTasks(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
+ case request.Filters != nil && len(request.Filters.ServiceIDs) > 0:
+ tasks, err = store.FindTasks(tx, buildFilters(store.ByServiceID, request.Filters.ServiceIDs))
+ case request.Filters != nil && len(request.Filters.NodeIDs) > 0:
+ tasks, err = store.FindTasks(tx, buildFilters(store.ByNodeID, request.Filters.NodeIDs))
+ case request.Filters != nil && len(request.Filters.DesiredStates) > 0:
+ filters := make([]store.By, 0, len(request.Filters.DesiredStates))
+ for _, v := range request.Filters.DesiredStates {
+ filters = append(filters, store.ByDesiredState(v))
+ }
+ tasks, err = store.FindTasks(tx, store.Or(filters...))
+ default:
+ tasks, err = store.FindTasks(tx, store.All)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if request.Filters != nil {
+ tasks = filterTasks(tasks,
+ func(e *api.Task) bool {
+ return filterContains(e.ServiceAnnotations.Name, request.Filters.Names)
+ },
+ func(e *api.Task) bool {
+ return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
+ },
+ func(e *api.Task) bool {
+ return filterMatchLabels(e.ServiceAnnotations.Labels, request.Filters.Labels)
+ },
+ func(e *api.Task) bool {
+ return filterContains(e.ServiceID, request.Filters.ServiceIDs)
+ },
+ func(e *api.Task) bool {
+ return filterContains(e.NodeID, request.Filters.NodeIDs)
+ },
+ func(e *api.Task) bool {
+ if len(request.Filters.DesiredStates) == 0 {
+ return true
+ }
+ for _, c := range request.Filters.DesiredStates {
+ if c == e.DesiredState {
+ return true
+ }
+ }
+ return false
+ },
+ )
+ }
+
+ return &api.ListTasksResponse{
+ Tasks: tasks,
+ }, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
new file mode 100644
index 0000000000..b17b3e56f3
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
@@ -0,0 +1,760 @@
+package dispatcher
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/transport"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/ca"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/manager/state/watch"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+)
+
+const (
+ // DefaultHeartBeatPeriod is used for setting default value in cluster config
+ // and in case if cluster config is missing.
+ DefaultHeartBeatPeriod = 5 * time.Second
+ defaultHeartBeatEpsilon = 500 * time.Millisecond
+ defaultGracePeriodMultiplier = 3
+
+ // maxBatchItems is the threshold of queued writes that should
+ // trigger an actual transaction to commit them to the shared store.
+ maxBatchItems = 10000
+
+ // maxBatchInterval needs to strike a balance between keeping
+ // latency low, and realizing opportunities to combine many writes
+ // into a single transaction. A fraction of a second feels about
+ // right.
+ maxBatchInterval = 100 * time.Millisecond
+)
+
+var (
+ // ErrNodeAlreadyRegistered returned if node with same ID was already
+ // registered with this dispatcher.
+ ErrNodeAlreadyRegistered = errors.New("node already registered")
+ // ErrNodeNotRegistered returned if node with such ID wasn't registered
+ // with this dispatcher.
+ ErrNodeNotRegistered = errors.New("node not registered")
+ // ErrSessionInvalid returned when the session in use is no longer valid.
+ // The node should re-register and start a new session.
+ ErrSessionInvalid = errors.New("session invalid")
+ // ErrNodeNotFound returned when the Node doesn't exists in raft.
+ ErrNodeNotFound = errors.New("node not found")
+)
+
+// Config is configuration for Dispatcher. For default you should use
+// DefautConfig.
+type Config struct {
+ // Addr configures the address the dispatcher reports to agents.
+ Addr string
+ HeartbeatPeriod time.Duration
+ HeartbeatEpsilon time.Duration
+ GracePeriodMultiplier int
+}
+
+// DefaultConfig returns default config for Dispatcher.
+func DefaultConfig() *Config {
+ return &Config{
+ HeartbeatPeriod: DefaultHeartBeatPeriod,
+ HeartbeatEpsilon: defaultHeartBeatEpsilon,
+ GracePeriodMultiplier: defaultGracePeriodMultiplier,
+ }
+}
+
+// Cluster is interface which represent raft cluster. mananger/state/raft.Node
+// is implenents it. This interface needed only for easier unit-testing.
+type Cluster interface {
+ GetMemberlist() map[uint64]*api.RaftMember
+ MemoryStore() *store.MemoryStore
+}
+
+// Dispatcher is responsible for dispatching tasks and tracking agent health.
+type Dispatcher struct {
+ mu sync.Mutex
+ addr string
+ nodes *nodeStore
+ store *store.MemoryStore
+ mgrQueue *watch.Queue
+ lastSeenManagers []*api.WeightedPeer
+ networkBootstrapKeys []*api.EncryptionKey
+ keyMgrQueue *watch.Queue
+ config *Config
+ cluster Cluster
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ taskUpdates map[string]*api.TaskStatus // indexed by task ID
+ taskUpdatesLock sync.Mutex
+
+ processTaskUpdatesTrigger chan struct{}
+}
+
+// weightedPeerByNodeID is a sort wrapper for []*api.WeightedPeer
+type weightedPeerByNodeID []*api.WeightedPeer
+
+func (b weightedPeerByNodeID) Less(i, j int) bool { return b[i].Peer.NodeID < b[j].Peer.NodeID }
+
+func (b weightedPeerByNodeID) Len() int { return len(b) }
+
+func (b weightedPeerByNodeID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+
+// New returns Dispatcher with cluster interface(usually raft.Node).
+// NOTE: each handler which does something with raft must add to Dispatcher.wg
+func New(cluster Cluster, c *Config) *Dispatcher {
+ return &Dispatcher{
+ addr: c.Addr,
+ nodes: newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier),
+ store: cluster.MemoryStore(),
+ cluster: cluster,
+ mgrQueue: watch.NewQueue(16),
+ keyMgrQueue: watch.NewQueue(16),
+ lastSeenManagers: getWeightedPeers(cluster),
+ taskUpdates: make(map[string]*api.TaskStatus),
+ processTaskUpdatesTrigger: make(chan struct{}, 1),
+ config: c,
+ }
+}
+
+func getWeightedPeers(cluster Cluster) []*api.WeightedPeer {
+ members := cluster.GetMemberlist()
+ var mgrs []*api.WeightedPeer
+ for _, m := range members {
+ mgrs = append(mgrs, &api.WeightedPeer{
+ Peer: &api.Peer{
+ NodeID: identity.FormatNodeID(m.RaftID),
+ Addr: m.Addr,
+ },
+ Weight: 1,
+ })
+ }
+ return mgrs
+}
+
+// Run runs dispatcher tasks which should be run on leader dispatcher.
+// Dispatcher can be stopped with cancelling ctx or calling Stop().
+func (d *Dispatcher) Run(ctx context.Context) error {
+ d.mu.Lock()
+ if d.isRunning() {
+ d.mu.Unlock()
+ return fmt.Errorf("dispatcher is stopped")
+ }
+ d.wg.Add(1)
+ defer d.wg.Done()
+ logger := log.G(ctx).WithField("module", "dispatcher")
+ ctx = log.WithLogger(ctx, logger)
+ if err := d.markNodesUnknown(ctx); err != nil {
+ logger.Errorf("failed to mark all nodes unknown: %v", err)
+ }
+ configWatcher, cancel, err := store.ViewAndWatch(
+ d.store,
+ func(readTx store.ReadTx) error {
+ clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
+ if err != nil {
+ return err
+ }
+ if err == nil && len(clusters) == 1 {
+ d.config.HeartbeatPeriod = time.Duration(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
+ if clusters[0].NetworkBootstrapKeys != nil {
+ d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys
+ }
+ }
+ return nil
+ },
+ state.EventUpdateCluster{},
+ )
+ if err != nil {
+ return err
+ }
+ defer cancel()
+ d.ctx, d.cancel = context.WithCancel(ctx)
+ d.mu.Unlock()
+
+ publishManagers := func() {
+ mgrs := getWeightedPeers(d.cluster)
+ sort.Sort(weightedPeerByNodeID(mgrs))
+ d.mu.Lock()
+ if reflect.DeepEqual(mgrs, d.lastSeenManagers) {
+ d.mu.Unlock()
+ return
+ }
+ d.lastSeenManagers = mgrs
+ d.mu.Unlock()
+ d.mgrQueue.Publish(mgrs)
+ }
+
+ publishManagers()
+ publishTicker := time.NewTicker(1 * time.Second)
+ defer publishTicker.Stop()
+
+ batchTimer := time.NewTimer(maxBatchInterval)
+ defer batchTimer.Stop()
+
+ for {
+ select {
+ case <-publishTicker.C:
+ publishManagers()
+ case <-d.processTaskUpdatesTrigger:
+ d.processTaskUpdates()
+ batchTimer.Reset(maxBatchInterval)
+ case <-batchTimer.C:
+ d.processTaskUpdates()
+ batchTimer.Reset(maxBatchInterval)
+ case v := <-configWatcher:
+ cluster := v.(state.EventUpdateCluster)
+ d.mu.Lock()
+ d.config.HeartbeatPeriod = time.Duration(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
+ d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier)
+ d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys
+ d.mu.Unlock()
+ d.keyMgrQueue.Publish(struct{}{})
+ case <-d.ctx.Done():
+ return nil
+ }
+ }
+}
+
+// Stop stops dispatcher and closes all grpc streams.
+func (d *Dispatcher) Stop() error {
+ d.mu.Lock()
+ if !d.isRunning() {
+ return fmt.Errorf("dispatcher is already stopped")
+ }
+ d.cancel()
+ d.mu.Unlock()
+ d.nodes.Clean()
+ // wait for all handlers to finish their raft deals, because manager will
+ // set raftNode to nil
+ d.wg.Wait()
+ return nil
+}
+
+func (d *Dispatcher) addTask() error {
+ d.mu.Lock()
+ if !d.isRunning() {
+ d.mu.Unlock()
+ return grpc.Errorf(codes.Aborted, "dispatcher is stopped")
+ }
+ d.wg.Add(1)
+ d.mu.Unlock()
+ return nil
+}
+
+func (d *Dispatcher) doneTask() {
+ d.wg.Done()
+}
+
+func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
+ log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
+ var nodes []*api.Node
+ var err error
+ d.store.View(func(tx store.ReadTx) {
+ nodes, err = store.FindNodes(tx, store.All)
+ })
+ if err != nil {
+ return fmt.Errorf("failed to get list of nodes: %v", err)
+ }
+ _, err = d.store.Batch(func(batch *store.Batch) error {
+ for _, n := range nodes {
+ err := batch.Update(func(tx store.Tx) error {
+ // check if node is still here
+ node := store.GetNode(tx, n.ID)
+ if node == nil {
+ return nil
+ }
+ // do not try to resurrect down nodes
+ if node.Status.State == api.NodeStatus_DOWN {
+ return nil
+ }
+ node.Status = api.NodeStatus{
+ State: api.NodeStatus_UNKNOWN,
+ Message: "Node marked as unknown due to leadership change in cluster",
+ }
+ nodeID := node.ID
+
+ expireFunc := func() {
+ log := log.WithField("node", nodeID)
+ nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: "heartbeat failure for unknown node"}
+ log.Debugf("heartbeat expiration for unknown node")
+ if err := d.nodeRemove(nodeID, nodeStatus); err != nil {
+ log.WithError(err).Errorf("failed deregistering node after heartbeat expiration for unknown node")
+ }
+ }
+ if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
+ return fmt.Errorf("add unknown node failed: %v", err)
+ }
+ if err := store.UpdateNode(tx, node); err != nil {
+ return fmt.Errorf("update failed %v", err)
+ }
+ return nil
+ })
+ if err != nil {
+ log.WithField("node", n.ID).WithError(err).Errorf("failed to mark node as unknown")
+ }
+ }
+ return nil
+ })
+ return err
+}
+
+func (d *Dispatcher) isRunning() bool {
+ if d.ctx == nil {
+ return false
+ }
+ select {
+ case <-d.ctx.Done():
+ return false
+ default:
+ }
+ return true
+}
+
+// register is used for registration of node with particular dispatcher.
+func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, string, error) {
+ // prevent register until we're ready to accept it
+ if err := d.addTask(); err != nil {
+ return "", "", err
+ }
+ defer d.doneTask()
+
+ // create or update node in store
+ // TODO(stevvooe): Validate node specification.
+ var node *api.Node
+ err := d.store.Update(func(tx store.Tx) error {
+ node = store.GetNode(tx, nodeID)
+ if node == nil {
+ return ErrNodeNotFound
+ }
+
+ node.Description = description
+ node.Status = api.NodeStatus{
+ State: api.NodeStatus_READY,
+ }
+ return store.UpdateNode(tx, node)
+
+ })
+ if err != nil {
+ return "", "", err
+ }
+
+ expireFunc := func() {
+ nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: "heartbeat failure"}
+ log.G(ctx).Debugf("heartbeat expiration")
+ if err := d.nodeRemove(nodeID, nodeStatus); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration")
+ }
+ }
+
+ rn := d.nodes.Add(node, expireFunc)
+
+ // NOTE(stevvooe): We need be a little careful with re-registration. The
+ // current implementation just matches the node id and then gives away the
+ // sessionID. If we ever want to use sessionID as a secret, which we may
+ // want to, this is giving away the keys to the kitchen.
+ //
+ // The right behavior is going to be informed by identity. Basically, each
+ // time a node registers, we invalidate the session and issue a new
+ // session, once identity is proven. This will cause misbehaved agents to
+ // be kicked when multiple connections are made.
+ return rn.Node.ID, rn.SessionID, nil
+}
+
+// UpdateTaskStatus updates status of task. Node should send such updates
+// on every status change of its tasks.
+func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) {
+ nodeInfo, err := ca.RemoteNode(ctx)
+ if err != nil {
+ return nil, err
+ }
+ nodeID := nodeInfo.NodeID
+ fields := logrus.Fields{
+ "node.id": nodeID,
+ "node.session": r.SessionID,
+ "method": "(*Dispatcher).UpdateTaskStatus",
+ }
+ if nodeInfo.ForwardedBy != nil {
+ fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+ }
+ log := log.G(ctx).WithFields(fields)
+
+ if err := d.addTask(); err != nil {
+ return nil, err
+ }
+ defer d.doneTask()
+
+ if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
+ return nil, err
+ }
+
+ // Validate task updates
+ for _, u := range r.Updates {
+ if u.Status == nil {
+ log.WithField("task.id", u.TaskID).Warn("task report has nil status")
+ continue
+ }
+
+ var t *api.Task
+ d.store.View(func(tx store.ReadTx) {
+ t = store.GetTask(tx, u.TaskID)
+ })
+ if t == nil {
+ log.WithField("task.id", u.TaskID).Warn("cannot find target task in store")
+ continue
+ }
+
+ if t.NodeID != nodeID {
+ err := grpc.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node")
+ log.WithField("task.id", u.TaskID).Error(err)
+ return nil, err
+ }
+ }
+
+ d.taskUpdatesLock.Lock()
+ // Enqueue task updates
+ for _, u := range r.Updates {
+ if u.Status == nil {
+ continue
+ }
+ d.taskUpdates[u.TaskID] = u.Status
+ }
+
+ numUpdates := len(d.taskUpdates)
+ d.taskUpdatesLock.Unlock()
+
+ if numUpdates >= maxBatchItems {
+ d.processTaskUpdatesTrigger <- struct{}{}
+ }
+ return nil, nil
+}
+
+func (d *Dispatcher) processTaskUpdates() {
+ d.taskUpdatesLock.Lock()
+ if len(d.taskUpdates) == 0 {
+ d.taskUpdatesLock.Unlock()
+ return
+ }
+ taskUpdates := d.taskUpdates
+ d.taskUpdates = make(map[string]*api.TaskStatus)
+ d.taskUpdatesLock.Unlock()
+
+ log := log.G(d.ctx).WithFields(logrus.Fields{
+ "method": "(*Dispatcher).processTaskUpdates",
+ })
+
+ _, err := d.store.Batch(func(batch *store.Batch) error {
+ for taskID, status := range taskUpdates {
+ err := batch.Update(func(tx store.Tx) error {
+ logger := log.WithField("task.id", taskID)
+ task := store.GetTask(tx, taskID)
+ if task == nil {
+ logger.Errorf("task unavailable")
+ return nil
+ }
+
+ logger = logger.WithField("state.transition", fmt.Sprintf("%v->%v", task.Status.State, status.State))
+
+ if task.Status == *status {
+ logger.Debug("task status identical, ignoring")
+ return nil
+ }
+
+ if task.Status.State > status.State {
+ logger.Debug("task status invalid transition")
+ return nil
+ }
+
+ task.Status = *status
+ if err := store.UpdateTask(tx, task); err != nil {
+ logger.WithError(err).Error("failed to update task status")
+ return nil
+ }
+ logger.Debug("task status updated")
+ return nil
+ })
+ if err != nil {
+ log.WithError(err).Error("dispatcher transaction failed")
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ log.WithError(err).Error("dispatcher batch failed")
+ }
+}
+
+// Tasks is a stream of tasks state for node. Each message contains full list
+// of tasks which should be run on node, if task is not present in that list,
+// it should be terminated.
+func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
+ nodeInfo, err := ca.RemoteNode(stream.Context())
+ if err != nil {
+ return err
+ }
+ nodeID := nodeInfo.NodeID
+
+ if err := d.addTask(); err != nil {
+ return err
+ }
+ defer d.doneTask()
+
+ fields := logrus.Fields{
+ "node.id": nodeID,
+ "node.session": r.SessionID,
+ "method": "(*Dispatcher).Tasks",
+ }
+ if nodeInfo.ForwardedBy != nil {
+ fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+ }
+ log.G(stream.Context()).WithFields(fields).Debugf("")
+
+ if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
+ return err
+ }
+
+ tasksMap := make(map[string]*api.Task)
+ nodeTasks, cancel, err := store.ViewAndWatch(
+ d.store,
+ func(readTx store.ReadTx) error {
+ tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
+ if err != nil {
+ return err
+ }
+ for _, t := range tasks {
+ tasksMap[t.ID] = t
+ }
+ return nil
+ },
+ state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
+ Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
+ state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
+ Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
+ state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
+ Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
+ )
+ if err != nil {
+ return err
+ }
+ defer cancel()
+
+ for {
+ if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
+ return err
+ }
+
+ var tasks []*api.Task
+ for _, t := range tasksMap {
+ // dispatcher only sends tasks that have been assigned to a node
+ if t != nil && t.Status.State >= api.TaskStateAssigned {
+ tasks = append(tasks, t)
+ }
+ }
+
+ if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
+ return err
+ }
+
+ select {
+ case event := <-nodeTasks:
+ switch v := event.(type) {
+ case state.EventCreateTask:
+ tasksMap[v.Task.ID] = v.Task
+ case state.EventUpdateTask:
+ tasksMap[v.Task.ID] = v.Task
+ case state.EventDeleteTask:
+ delete(tasksMap, v.Task.ID)
+ }
+ case <-stream.Context().Done():
+ return stream.Context().Err()
+ case <-d.ctx.Done():
+ return d.ctx.Err()
+ }
+ }
+}
+
+func (d *Dispatcher) nodeRemove(id string, status api.NodeStatus) error {
+ if err := d.addTask(); err != nil {
+ return err
+ }
+ defer d.doneTask()
+ // TODO(aaronl): Is it worth batching node removals?
+ err := d.store.Update(func(tx store.Tx) error {
+ node := store.GetNode(tx, id)
+ if node == nil {
+ return errors.New("node not found")
+ }
+ node.Status = status
+ return store.UpdateNode(tx, node)
+ })
+ if err != nil {
+ return fmt.Errorf("failed to update node %s status to down: %v", id, err)
+ }
+
+ if rn := d.nodes.Delete(id); rn == nil {
+ return fmt.Errorf("node %s is not found in local storage", id)
+ }
+
+ return nil
+}
+
+// Heartbeat is heartbeat method for nodes. It returns new TTL in response.
+// Node should send new heartbeat earlier than now + TTL, otherwise it will
+// be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
+func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*api.HeartbeatResponse, error) {
+ nodeInfo, err := ca.RemoteNode(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID)
+ return &api.HeartbeatResponse{Period: *ptypes.DurationProto(period)}, err
+}
+
+func (d *Dispatcher) getManagers() []*api.WeightedPeer {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.lastSeenManagers
+}
+
+// Session is a stream which controls agent connection.
+// Each message contains list of backup Managers with weights. Also there is
+// a special boolean field Disconnect which if true indicates that node should
+// reconnect to another Manager immediately.
+func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error {
+ ctx := stream.Context()
+ nodeInfo, err := ca.RemoteNode(ctx)
+ if err != nil {
+ return err
+ }
+ nodeID := nodeInfo.NodeID
+
+ if err := d.addTask(); err != nil {
+ return err
+ }
+ defer d.doneTask()
+
+ // register the node.
+ nodeID, sessionID, err := d.register(stream.Context(), nodeID, r.Description)
+ if err != nil {
+ return err
+ }
+
+ fields := logrus.Fields{
+ "node.id": nodeID,
+ "node.session": sessionID,
+ "method": "(*Dispatcher).Session",
+ }
+ if nodeInfo.ForwardedBy != nil {
+ fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+ }
+ log := log.G(ctx).WithFields(fields)
+
+ var nodeObj *api.Node
+ nodeUpdates, cancel, err := store.ViewAndWatch(d.store, func(readTx store.ReadTx) error {
+ nodeObj = store.GetNode(readTx, nodeID)
+ return nil
+ }, state.EventUpdateNode{Node: &api.Node{ID: nodeID},
+ Checks: []state.NodeCheckFunc{state.NodeCheckID}},
+ )
+ if cancel != nil {
+ defer cancel()
+ }
+
+ if err != nil {
+ log.WithError(err).Error("ViewAndWatch Node failed")
+ }
+
+ if _, err = d.nodes.GetWithSession(nodeID, sessionID); err != nil {
+ return err
+ }
+
+ if err := stream.Send(&api.SessionMessage{
+ SessionID: sessionID,
+ Node: nodeObj,
+ Managers: d.getManagers(),
+ NetworkBootstrapKeys: d.networkBootstrapKeys,
+ }); err != nil {
+ return err
+ }
+
+ managerUpdates, mgrCancel := d.mgrQueue.Watch()
+ defer mgrCancel()
+ keyMgrUpdates, keyMgrCancel := d.keyMgrQueue.Watch()
+ defer keyMgrCancel()
+
+ // disconnect is a helper forcibly shutdown connection
+ disconnect := func() error {
+ // force disconnect by shutting down the stream.
+ transportStream, ok := transport.StreamFromContext(stream.Context())
+ if ok {
+ // if we have the transport stream, we can signal a disconnect
+ // in the client.
+ if err := transportStream.ServerTransport().Close(); err != nil {
+ log.WithError(err).Error("session end")
+ }
+ }
+
+ nodeStatus := api.NodeStatus{State: api.NodeStatus_DISCONNECTED, Message: "node is currently trying to find new manager"}
+ if err := d.nodeRemove(nodeID, nodeStatus); err != nil {
+ log.WithError(err).Error("failed to remove node")
+ }
+ // still return an abort if the transport closure was ineffective.
+ return grpc.Errorf(codes.Aborted, "node must disconnect")
+ }
+
+ for {
+ // After each message send, we need to check the nodes sessionID hasn't
+ // changed. If it has, we will the stream and make the node
+ // re-register.
+ node, err := d.nodes.GetWithSession(nodeID, sessionID)
+ if err != nil {
+ return err
+ }
+
+ var mgrs []*api.WeightedPeer
+
+ select {
+ case ev := <-managerUpdates:
+ mgrs = ev.([]*api.WeightedPeer)
+ case ev := <-nodeUpdates:
+ nodeObj = ev.(state.EventUpdateNode).Node
+ case <-stream.Context().Done():
+ return stream.Context().Err()
+ case <-node.Disconnect:
+ return disconnect()
+ case <-d.ctx.Done():
+ return disconnect()
+ case <-keyMgrUpdates:
+ }
+ if mgrs == nil {
+ mgrs = d.getManagers()
+ }
+
+ if err := stream.Send(&api.SessionMessage{
+ SessionID: sessionID,
+ Node: nodeObj,
+ Managers: mgrs,
+ NetworkBootstrapKeys: d.networkBootstrapKeys,
+ }); err != nil {
+ return err
+ }
+ }
+}
+
+// NodeCount returns number of nodes which connected to this dispatcher.
+func (d *Dispatcher) NodeCount() int {
+ return d.nodes.Len()
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/dispatcher/heartbeat/heartbeat.go b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/heartbeat/heartbeat.go
new file mode 100644
index 0000000000..b591868cf6
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/heartbeat/heartbeat.go
@@ -0,0 +1,39 @@
+package heartbeat
+
+import (
+ "sync/atomic"
+ "time"
+)
+
+// Heartbeat is simple way to track heartbeats.
+type Heartbeat struct {
+ timeout int64
+ timer *time.Timer
+}
+
+// New creates new Heartbeat with specified duration. timeoutFunc will be called
+// if timeout for heartbeat is expired. Note that in case of timeout you need to
+// call Beat() to reactivate Heartbeat.
+func New(timeout time.Duration, timeoutFunc func()) *Heartbeat {
+ hb := &Heartbeat{
+ timeout: int64(timeout),
+ timer: time.AfterFunc(timeout, timeoutFunc),
+ }
+ return hb
+}
+
+// Beat resets internal timer to zero. It also can be used to reactivate
+// Heartbeat after timeout.
+func (hb *Heartbeat) Beat() {
+ hb.timer.Reset(time.Duration(atomic.LoadInt64(&hb.timeout)))
+}
+
+// Update updates internal timeout to d. It does not do Beat.
+func (hb *Heartbeat) Update(d time.Duration) {
+ atomic.StoreInt64(&hb.timeout, int64(d))
+}
+
+// Stop stops Heartbeat timer.
+func (hb *Heartbeat) Stop() {
+ hb.timer.Stop()
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/dispatcher/nodes.go b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/nodes.go
new file mode 100644
index 0000000000..5982db7b90
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/nodes.go
@@ -0,0 +1,162 @@
+package dispatcher
+
+import (
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/manager/dispatcher/heartbeat"
+)
+
+type registeredNode struct {
+ SessionID string
+ Heartbeat *heartbeat.Heartbeat
+ Node *api.Node
+ Disconnect chan struct{} // signal to disconnect
+ mu sync.Mutex
+}
+
+// checkSessionID determines if the SessionID has changed and returns the
+// appropriate GRPC error code.
+//
+// This may not belong here in the future.
+func (rn *registeredNode) checkSessionID(sessionID string) error {
+ rn.mu.Lock()
+ defer rn.mu.Unlock()
+
+ // Before each message send, we need to check the nodes sessionID hasn't
+ // changed. If it has, we will the stream and make the node
+ // re-register.
+ if sessionID == "" || rn.SessionID != sessionID {
+ return grpc.Errorf(codes.InvalidArgument, ErrSessionInvalid.Error())
+ }
+
+ return nil
+}
+
+type nodeStore struct {
+ periodChooser *periodChooser
+ gracePeriodMultiplier time.Duration
+ nodes map[string]*registeredNode
+ mu sync.RWMutex
+}
+
+func newNodeStore(hbPeriod, hbEpsilon time.Duration, graceMultiplier int) *nodeStore {
+ return &nodeStore{
+ nodes: make(map[string]*registeredNode),
+ periodChooser: newPeriodChooser(hbPeriod, hbEpsilon),
+ gracePeriodMultiplier: time.Duration(graceMultiplier),
+ }
+}
+
+func (s *nodeStore) updatePeriod(hbPeriod, hbEpsilon time.Duration, gracePeriodMultiplier int) {
+ s.mu.Lock()
+ s.periodChooser = newPeriodChooser(hbPeriod, hbEpsilon)
+ s.gracePeriodMultiplier = time.Duration(gracePeriodMultiplier)
+ s.mu.Unlock()
+}
+
+func (s *nodeStore) Len() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return len(s.nodes)
+}
+
+func (s *nodeStore) AddUnknown(n *api.Node, expireFunc func()) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ rn := &registeredNode{
+ Node: n,
+ }
+ s.nodes[n.ID] = rn
+ rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplier, expireFunc)
+ return nil
+}
+
+// Add adds new node and returns it, it replaces existing without notification.
+func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if existRn, ok := s.nodes[n.ID]; ok {
+ existRn.Heartbeat.Stop()
+ delete(s.nodes, n.ID)
+ }
+ rn := &registeredNode{
+ SessionID: identity.NewID(), // session ID is local to the dispatcher.
+ Node: n,
+ Disconnect: make(chan struct{}),
+ }
+ s.nodes[n.ID] = rn
+ rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplier, expireFunc)
+ return rn
+}
+
+func (s *nodeStore) Get(id string) (*registeredNode, error) {
+ s.mu.RLock()
+ rn, ok := s.nodes[id]
+ s.mu.RUnlock()
+ if !ok {
+ return nil, grpc.Errorf(codes.NotFound, ErrNodeNotRegistered.Error())
+ }
+ return rn, nil
+}
+
+func (s *nodeStore) GetWithSession(id, sid string) (*registeredNode, error) {
+ s.mu.RLock()
+ rn, ok := s.nodes[id]
+ s.mu.RUnlock()
+ if !ok {
+ return nil, grpc.Errorf(codes.NotFound, ErrNodeNotRegistered.Error())
+ }
+ return rn, rn.checkSessionID(sid)
+}
+
+func (s *nodeStore) Heartbeat(id, sid string) (time.Duration, error) {
+ rn, err := s.GetWithSession(id, sid)
+ if err != nil {
+ return 0, err
+ }
+ period := s.periodChooser.Choose() // base period for node
+ grace := period * time.Duration(s.gracePeriodMultiplier)
+ rn.mu.Lock()
+ rn.Heartbeat.Update(grace)
+ rn.Heartbeat.Beat()
+ rn.mu.Unlock()
+ return period, nil
+}
+
+func (s *nodeStore) Delete(id string) *registeredNode {
+ s.mu.Lock()
+ var node *registeredNode
+ if rn, ok := s.nodes[id]; ok {
+ delete(s.nodes, id)
+ rn.Heartbeat.Stop()
+ node = rn
+ }
+ s.mu.Unlock()
+ return node
+}
+
+func (s *nodeStore) Disconnect(id string) {
+ s.mu.Lock()
+ if rn, ok := s.nodes[id]; ok {
+ close(rn.Disconnect)
+ rn.Heartbeat.Stop()
+ }
+ s.mu.Unlock()
+}
+
+// Clean removes all nodes and stops their heartbeats.
+// It's equivalent to invalidate all sessions.
+func (s *nodeStore) Clean() {
+ s.mu.Lock()
+ for _, rn := range s.nodes {
+ rn.Heartbeat.Stop()
+ }
+ s.nodes = make(map[string]*registeredNode)
+ s.mu.Unlock()
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/dispatcher/period.go b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/period.go
new file mode 100644
index 0000000000..d4457756ea
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/period.go
@@ -0,0 +1,28 @@
+package dispatcher
+
+import (
+ "math/rand"
+ "time"
+)
+
+type periodChooser struct {
+ period time.Duration
+ epsilon time.Duration
+ rand *rand.Rand
+}
+
+func newPeriodChooser(period, eps time.Duration) *periodChooser {
+ return &periodChooser{
+ period: period,
+ epsilon: eps,
+ rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+ }
+}
+
+func (pc *periodChooser) Choose() time.Duration {
+ var adj int64
+ if pc.epsilon > 0 {
+ adj = rand.Int63n(int64(2*pc.epsilon)) - int64(pc.epsilon)
+ }
+ return pc.period + time.Duration(adj)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/doc.go b/vendor/src/github.com/docker/swarmkit/manager/doc.go
new file mode 100644
index 0000000000..5d04392c76
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/doc.go
@@ -0,0 +1 @@
+package manager
diff --git a/vendor/src/github.com/docker/swarmkit/manager/keymanager/keymanager.go b/vendor/src/github.com/docker/swarmkit/manager/keymanager/keymanager.go
new file mode 100644
index 0000000000..80d027a684
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/keymanager/keymanager.go
@@ -0,0 +1,229 @@
+package keymanager
+
+// keymanager does the allocation, rotation and distribution of symmetric
+// keys to the agents. This is to securely bootstrap network communication
+// between agents. It can be used for encrypting gossip between the agents
+// which is used to exchange service discovery and overlay network control
+// plane information. It can also be used to encrypt overlay data traffic.
+import (
+ "crypto/rand"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+)
+
+const (
+ // DefaultKeyLen is the default length (in bytes) of the key allocated
+ DefaultKeyLen = 16
+
+ // DefaultKeyRotationInterval used by key manager
+ DefaultKeyRotationInterval = 12 * time.Hour
+
+ // SubsystemGossip handles gossip protocol between the agents
+ SubsystemGossip = "networking:gossip"
+
+ // SubsystemIPSec is overlay network data encryption subsystem
+ SubsystemIPSec = "networking:ipsec"
+
+ // DefaultSubsystem is gossip
+ DefaultSubsystem = SubsystemGossip
+)
+
+// map of subsystems and corresponding encryption algorithm. Initially only
+// AES_128 in GCM mode is supported.
+var subsysToAlgo = map[string]api.EncryptionKey_Algorithm{
+ SubsystemGossip: api.AES_128_GCM,
+ SubsystemIPSec: api.AES_128_GCM,
+}
+
+type keyRing struct {
+ lClock uint64
+ keys []*api.EncryptionKey
+}
+
+// Config for the keymanager that can be modified
+type Config struct {
+ ClusterName string
+ Keylen int
+ RotationInterval time.Duration
+ Subsystems []string
+}
+
+// KeyManager handles key allocation, rotation & distribution
+type KeyManager struct {
+ config *Config
+ store *store.MemoryStore
+ keyRing *keyRing
+ ticker *time.Ticker
+ ctx context.Context
+ cancel context.CancelFunc
+
+ mu sync.Mutex
+}
+
+// DefaultConfig provides the default config for keymanager
+func DefaultConfig() *Config {
+ return &Config{
+ ClusterName: store.DefaultClusterName,
+ Keylen: DefaultKeyLen,
+ RotationInterval: DefaultKeyRotationInterval,
+ Subsystems: []string{DefaultSubsystem},
+ }
+}
+
+// New creates an instance of keymanager with the given config
+func New(store *store.MemoryStore, config *Config) *KeyManager {
+ for _, subsys := range config.Subsystems {
+ if subsys != SubsystemGossip && subsys != SubsystemIPSec {
+ return nil
+ }
+ }
+ return &KeyManager{
+ config: config,
+ store: store,
+ keyRing: &keyRing{},
+ }
+}
+
+func (k *KeyManager) allocateKey(ctx context.Context, subsys string) *api.EncryptionKey {
+ key := make([]byte, k.config.Keylen)
+
+ _, err := rand.Read(key)
+ if err != nil {
+ panic(fmt.Errorf("key generated failed, %v", err))
+ }
+ k.keyRing.lClock++
+
+ return &api.EncryptionKey{
+ Subsystem: subsys,
+ Algorithm: subsysToAlgo[subsys],
+ Key: key,
+ LamportTime: k.keyRing.lClock,
+ }
+}
+
+func (k *KeyManager) updateKey(cluster *api.Cluster) error {
+ return k.store.Update(func(tx store.Tx) error {
+ cluster = store.GetCluster(tx, cluster.ID)
+ if cluster == nil {
+ return nil
+ }
+ cluster.EncryptionKeyLamportClock = k.keyRing.lClock
+ cluster.NetworkBootstrapKeys = k.keyRing.keys
+ return store.UpdateCluster(tx, cluster)
+ })
+}
+
+func (k *KeyManager) rotateKey(ctx context.Context) error {
+ log := log.G(ctx).WithField("module", "keymanager")
+ var (
+ clusters []*api.Cluster
+ err error
+ )
+ k.store.View(func(readTx store.ReadTx) {
+ clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
+ })
+
+ if err != nil {
+ log.Errorf("reading cluster config failed, %v", err)
+ return err
+ }
+
+ cluster := clusters[0]
+ if len(cluster.NetworkBootstrapKeys) == 0 {
+ panic(fmt.Errorf("no key in the cluster config"))
+ }
+
+ subsysKeys := map[string][]*api.EncryptionKey{}
+ for _, key := range k.keyRing.keys {
+ subsysKeys[key.Subsystem] = append(subsysKeys[key.Subsystem], key)
+ }
+ k.keyRing.keys = []*api.EncryptionKey{}
+
+ // We maintain the latest key and the one before in the key ring to allow
+ // agents to communicate without disruption on key change.
+ for subsys, keys := range subsysKeys {
+ if len(keys) > 1 {
+ min := 0
+ for i, key := range keys[1:] {
+ if key.LamportTime < keys[min].LamportTime {
+ min = i
+ }
+ }
+ keys = append(keys[0:min], keys[min+1:]...)
+ }
+ keys = append(keys, k.allocateKey(ctx, subsys))
+ subsysKeys[subsys] = keys
+ }
+
+ for _, keys := range subsysKeys {
+ k.keyRing.keys = append(k.keyRing.keys, keys...)
+ }
+
+ return k.updateKey(cluster)
+}
+
+// Run starts the keymanager, it doesn't return
+func (k *KeyManager) Run(ctx context.Context) error {
+ k.mu.Lock()
+ log := log.G(ctx).WithField("module", "keymanager")
+ var (
+ clusters []*api.Cluster
+ err error
+ )
+ k.store.View(func(readTx store.ReadTx) {
+ clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
+ })
+
+ if err != nil {
+ log.Errorf("reading cluster config failed, %v", err)
+ k.mu.Unlock()
+ return err
+ }
+
+ cluster := clusters[0]
+ if len(cluster.NetworkBootstrapKeys) == 0 {
+ for _, subsys := range k.config.Subsystems {
+ k.keyRing.keys = append(k.keyRing.keys, k.allocateKey(ctx, subsys))
+ }
+ if err := k.updateKey(cluster); err != nil {
+ log.Errorf("store update failed %v", err)
+ }
+ } else {
+ k.keyRing.lClock = cluster.EncryptionKeyLamportClock
+ k.keyRing.keys = cluster.NetworkBootstrapKeys
+
+ k.rotateKey(ctx)
+ }
+
+ ticker := time.NewTicker(k.config.RotationInterval)
+ defer ticker.Stop()
+
+ k.ctx, k.cancel = context.WithCancel(ctx)
+ k.mu.Unlock()
+
+ for {
+ select {
+ case <-ticker.C:
+ k.rotateKey(ctx)
+ case <-k.ctx.Done():
+ return nil
+ }
+ }
+}
+
+// Stop stops the running instance of key manager
+func (k *KeyManager) Stop() error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ if k.cancel == nil {
+ return fmt.Errorf("keymanager is not started")
+ }
+ k.cancel()
+ return nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/manager.go b/vendor/src/github.com/docker/swarmkit/manager/manager.go
new file mode 100644
index 0000000000..5f1c70aec4
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/manager.go
@@ -0,0 +1,670 @@
+package manager
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/ca"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/allocator"
+ "github.com/docker/swarmkit/manager/controlapi"
+ "github.com/docker/swarmkit/manager/dispatcher"
+ "github.com/docker/swarmkit/manager/keymanager"
+ "github.com/docker/swarmkit/manager/orchestrator"
+ "github.com/docker/swarmkit/manager/raftpicker"
+ "github.com/docker/swarmkit/manager/scheduler"
+ "github.com/docker/swarmkit/manager/state/raft"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+const (
+ // defaultTaskHistoryRetentionLimit is the number of tasks to keep.
+ defaultTaskHistoryRetentionLimit = 10
+)
+
+// Config is used to tune the Manager.
+type Config struct {
+ SecurityConfig *ca.SecurityConfig
+
+ ProtoAddr map[string]string
+ // ProtoListener will be used for grpc serving if it's not nil,
+ // ProtoAddr fields will be used to create listeners otherwise.
+ ProtoListener map[string]net.Listener
+
+ // JoinRaft is an optional address of a node in an existing raft
+ // cluster to join.
+ JoinRaft string
+
+ // Top-level state directory
+ StateDir string
+
+ // ForceNewCluster defines if we have to force a new cluster
+ // because we are recovering from a backup data directory.
+ ForceNewCluster bool
+
+ // ElectionTick defines the amount of ticks needed without
+ // leader to trigger a new election
+ ElectionTick uint32
+
+ // HeartbeatTick defines the amount of ticks between each
+ // heartbeat sent to other members for health-check purposes
+ HeartbeatTick uint32
+}
+
+// Manager is the cluster manager for Swarm.
+// This is the high-level object holding and initializing all the manager
+// subsystems.
+type Manager struct {
+ config *Config
+ listeners map[string]net.Listener
+
+ caserver *ca.Server
+ Dispatcher *dispatcher.Dispatcher
+ replicatedOrchestrator *orchestrator.ReplicatedOrchestrator
+ globalOrchestrator *orchestrator.GlobalOrchestrator
+ taskReaper *orchestrator.TaskReaper
+ scheduler *scheduler.Scheduler
+ allocator *allocator.Allocator
+ keyManager *keymanager.KeyManager
+ server *grpc.Server
+ localserver *grpc.Server
+ RaftNode *raft.Node
+
+ mu sync.Mutex
+ once sync.Once
+
+ stopped chan struct{}
+}
+
+// New creates a Manager which has not started to accept requests yet.
+func New(config *Config) (*Manager, error) {
+ dispatcherConfig := dispatcher.DefaultConfig()
+
+ if config.ProtoAddr == nil {
+ config.ProtoAddr = make(map[string]string)
+ }
+
+ if config.ProtoListener != nil && config.ProtoListener["tcp"] != nil {
+ config.ProtoAddr["tcp"] = config.ProtoListener["tcp"].Addr().String()
+ }
+
+ tcpAddr := config.ProtoAddr["tcp"]
+
+ listenHost, listenPort, err := net.SplitHostPort(tcpAddr)
+ if err == nil {
+ ip := net.ParseIP(listenHost)
+ if ip != nil && ip.IsUnspecified() {
+ // Find our local IP address associated with the default route.
+ // This may not be the appropriate address to use for internal
+ // cluster communications, but it seems like the best default.
+ // The admin can override this address if necessary.
+ conn, err := net.Dial("udp", "8.8.8.8:53")
+ if err != nil {
+ return nil, fmt.Errorf("could not determine local IP address: %v", err)
+ }
+ localAddr := conn.LocalAddr().String()
+ conn.Close()
+
+ listenHost, _, err = net.SplitHostPort(localAddr)
+ if err != nil {
+ return nil, fmt.Errorf("could not split local IP address: %v", err)
+ }
+
+ tcpAddr = net.JoinHostPort(listenHost, listenPort)
+ }
+ }
+
+ // TODO(stevvooe): Reported address of manager is plumbed to listen addr
+ // for now, may want to make this separate. This can be tricky to get right
+ // so we need to make it easy to override. This needs to be the address
+ // through which agent nodes access the manager.
+ dispatcherConfig.Addr = tcpAddr
+
+ err = os.MkdirAll(filepath.Dir(config.ProtoAddr["unix"]), 0700)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create socket directory: %v", err)
+ }
+
+ err = os.MkdirAll(config.StateDir, 0700)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create state directory: %v", err)
+ }
+
+ raftStateDir := filepath.Join(config.StateDir, "raft")
+ err = os.MkdirAll(raftStateDir, 0700)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create raft state directory: %v", err)
+ }
+
+ var listeners map[string]net.Listener
+ if len(config.ProtoListener) > 0 {
+ listeners = config.ProtoListener
+ } else {
+ listeners = make(map[string]net.Listener)
+
+ for proto, addr := range config.ProtoAddr {
+ l, err := net.Listen(proto, addr)
+
+ // A unix socket may fail to bind if the file already
+ // exists. Try replacing the file.
+ unwrappedErr := err
+ if op, ok := unwrappedErr.(*net.OpError); ok {
+ unwrappedErr = op.Err
+ }
+ if sys, ok := unwrappedErr.(*os.SyscallError); ok {
+ unwrappedErr = sys.Err
+ }
+ if proto == "unix" && unwrappedErr == syscall.EADDRINUSE {
+ os.Remove(addr)
+ l, err = net.Listen(proto, addr)
+ if err != nil {
+ return nil, err
+ }
+ } else if err != nil {
+ return nil, err
+ }
+ listeners[proto] = l
+ }
+ }
+
+ raftCfg := raft.DefaultNodeConfig()
+
+ if config.ElectionTick > 0 {
+ raftCfg.ElectionTick = int(config.ElectionTick)
+ }
+ if config.HeartbeatTick > 0 {
+ raftCfg.HeartbeatTick = int(config.HeartbeatTick)
+ }
+
+ newNodeOpts := raft.NewNodeOptions{
+ ID: config.SecurityConfig.ClientTLSCreds.NodeID(),
+ Addr: tcpAddr,
+ JoinAddr: config.JoinRaft,
+ Config: raftCfg,
+ StateDir: raftStateDir,
+ ForceNewCluster: config.ForceNewCluster,
+ TLSCredentials: config.SecurityConfig.ClientTLSCreds,
+ }
+ RaftNode, err := raft.NewNode(context.TODO(), newNodeOpts)
+ if err != nil {
+ for _, lis := range listeners {
+ lis.Close()
+ }
+ return nil, fmt.Errorf("can't create raft node: %v", err)
+ }
+
+ opts := []grpc.ServerOption{
+ grpc.Creds(config.SecurityConfig.ServerTLSCreds)}
+
+ m := &Manager{
+ config: config,
+ listeners: listeners,
+ caserver: ca.NewServer(RaftNode.MemoryStore(), config.SecurityConfig),
+ Dispatcher: dispatcher.New(RaftNode, dispatcherConfig),
+ server: grpc.NewServer(opts...),
+ localserver: grpc.NewServer(opts...),
+ RaftNode: RaftNode,
+ stopped: make(chan struct{}),
+ }
+
+ return m, nil
+}
+
+// Run starts all manager sub-systems and the gRPC server at the configured
+// address.
+// The call never returns unless an error occurs or `Stop()` is called.
+//
+// TODO(aluzzardi): /!\ This function is *way* too complex. /!\
+// It needs to be split into smaller manageable functions.
+func (m *Manager) Run(parent context.Context) error {
+ ctx, ctxCancel := context.WithCancel(parent)
+ defer ctxCancel()
+
+ // Harakiri.
+ go func() {
+ select {
+ case <-ctx.Done():
+ case <-m.stopped:
+ ctxCancel()
+ }
+ }()
+
+ leadershipCh, cancel := m.RaftNode.SubscribeLeadership()
+ defer cancel()
+
+ go func() {
+ for leadershipEvent := range leadershipCh {
+ // read out and discard all of the messages when we've stopped
+ // don't acquire the mutex yet. if stopped is closed, we don't need
+ // this stops this loop from starving Run()'s attempt to Lock
+ select {
+ case <-m.stopped:
+ continue
+ default:
+ // do nothing, we're not stopped
+ }
+ // we're not stopping so NOW acquire the mutex
+ m.mu.Lock()
+ newState := leadershipEvent.(raft.LeadershipState)
+
+ if newState == raft.IsLeader {
+ s := m.RaftNode.MemoryStore()
+
+ rootCA := m.config.SecurityConfig.RootCA()
+ nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
+
+ raftCfg := raft.DefaultRaftConfig()
+ raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
+ raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
+
+ clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
+ s.Update(func(tx store.Tx) error {
+ // Add a default cluster object to the
+ // store. Don't check the error because
+ // we expect this to fail unless this
+ // is a brand new cluster.
+ store.CreateCluster(tx, &api.Cluster{
+ ID: clusterID,
+ Spec: api.ClusterSpec{
+ Annotations: api.Annotations{
+ Name: store.DefaultClusterName,
+ },
+ AcceptancePolicy: ca.DefaultAcceptancePolicy(),
+ Orchestration: api.OrchestrationConfig{
+ TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
+ },
+ Dispatcher: api.DispatcherConfig{
+ HeartbeatPeriod: uint64(dispatcher.DefaultHeartBeatPeriod),
+ },
+ Raft: raftCfg,
+ CAConfig: ca.DefaultCAConfig(),
+ },
+ RootCA: api.RootCA{
+ CAKey: rootCA.Key,
+ CACert: rootCA.Cert,
+ CACertHash: rootCA.Digest.String(),
+ },
+ })
+ // Add Node entry for ourself, if one
+ // doesn't exist already.
+ store.CreateNode(tx, &api.Node{
+ ID: nodeID,
+ Certificate: api.Certificate{
+ CN: nodeID,
+ Role: api.NodeRoleManager,
+ Status: api.IssuanceStatus{
+ State: api.IssuanceStateIssued,
+ },
+ },
+ Spec: api.NodeSpec{
+ Role: api.NodeRoleManager,
+ Membership: api.NodeMembershipAccepted,
+ },
+ })
+ return nil
+ })
+
+ // Attempt to rotate the key-encrypting-key of the root CA key-material
+ err := m.rotateRootCAKEK(ctx, clusterID)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
+ }
+
+ m.replicatedOrchestrator = orchestrator.New(s)
+ m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s)
+ m.taskReaper = orchestrator.NewTaskReaper(s)
+ m.scheduler = scheduler.New(s)
+ m.keyManager = keymanager.New(m.RaftNode.MemoryStore(), keymanager.DefaultConfig())
+
+ // TODO(stevvooe): Allocate a context that can be used to
+ // shutdown underlying manager processes when leadership is
+ // lost.
+
+ m.allocator, err = allocator.New(s)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed to create allocator")
+ // TODO(stevvooe): It doesn't seem correct here to fail
+ // creating the allocator but then use it anyways.
+ }
+
+ go func(keyManager *keymanager.KeyManager) {
+ if err := keyManager.Run(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("keymanager failed with an error")
+ }
+ }(m.keyManager)
+
+ go func(d *dispatcher.Dispatcher) {
+ if err := d.Run(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("Dispatcher exited with an error")
+ }
+ }(m.Dispatcher)
+
+ go func(server *ca.Server) {
+ if err := server.Run(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("CA signer exited with an error")
+ }
+ }(m.caserver)
+
+ // Start all sub-components in separate goroutines.
+ // TODO(aluzzardi): This should have some kind of error handling so that
+ // any component that goes down would bring the entire manager down.
+
+ if m.allocator != nil {
+ go func(allocator *allocator.Allocator) {
+ if err := allocator.Run(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("allocator exited with an error")
+ }
+ }(m.allocator)
+ }
+
+ go func(scheduler *scheduler.Scheduler) {
+ if err := scheduler.Run(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("scheduler exited with an error")
+ }
+ }(m.scheduler)
+ go func(taskReaper *orchestrator.TaskReaper) {
+ taskReaper.Run()
+ }(m.taskReaper)
+ go func(orchestrator *orchestrator.ReplicatedOrchestrator) {
+ if err := orchestrator.Run(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error")
+ }
+ }(m.replicatedOrchestrator)
+ go func(globalOrchestrator *orchestrator.GlobalOrchestrator) {
+ if err := globalOrchestrator.Run(ctx); err != nil {
+ log.G(ctx).WithError(err).Error("global orchestrator exited with an error")
+ }
+ }(m.globalOrchestrator)
+
+ } else if newState == raft.IsFollower {
+ m.Dispatcher.Stop()
+ m.caserver.Stop()
+
+ if m.allocator != nil {
+ m.allocator.Stop()
+ m.allocator = nil
+ }
+
+ m.replicatedOrchestrator.Stop()
+ m.replicatedOrchestrator = nil
+
+ m.globalOrchestrator.Stop()
+ m.globalOrchestrator = nil
+
+ m.taskReaper.Stop()
+ m.taskReaper = nil
+
+ m.scheduler.Stop()
+ m.scheduler = nil
+
+ m.keyManager.Stop()
+ m.keyManager = nil
+ }
+ m.mu.Unlock()
+ }
+ }()
+
+ go func() {
+ err := m.RaftNode.Run(ctx)
+ if err != nil {
+ log.G(ctx).Error(err)
+ m.Stop(ctx)
+ }
+ }()
+
+ proxyOpts := []grpc.DialOption{
+ grpc.WithBackoffMaxDelay(2 * time.Second),
+ grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
+ }
+
+ cs := raftpicker.NewConnSelector(m.RaftNode, proxyOpts...)
+
+ authorize := func(ctx context.Context, roles []string) error {
+ // Authorize the remote roles, ensure they can only be forwarded by managers
+ _, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization())
+ return err
+ }
+
+ baseControlAPI := controlapi.NewServer(m.RaftNode.MemoryStore(), m.RaftNode)
+
+ authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
+ authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.Dispatcher, authorize)
+ authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
+ authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
+ authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.RaftNode, authorize)
+
+ proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+ proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+ proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+
+ // localProxyControlAPI is a special kind of proxy. It is only wired up
+ // to receive requests from a trusted local socket, and these requests
+ // don't use TLS, therefore the requests it handles locally should
+ // bypass authorization. When it proxies, it sends them as requests from
+ // this manager rather than forwarded requests (it has no TLS
+ // information to put in the metadata map).
+ forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
+ localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, cs, m.RaftNode, forwardAsOwnRequest)
+
+ // Everything registered on m.server should be an authenticated
+ // wrapper, or a proxy wrapping an authenticated wrapper!
+ api.RegisterCAServer(m.server, proxyCAAPI)
+ api.RegisterNodeCAServer(m.server, proxyNodeCAAPI)
+ api.RegisterRaftServer(m.server, authenticatedRaftAPI)
+ api.RegisterControlServer(m.localserver, localProxyControlAPI)
+ api.RegisterControlServer(m.server, authenticatedControlAPI)
+ api.RegisterDispatcherServer(m.server, proxyDispatcherAPI)
+
+ errServe := make(chan error, 2)
+ for proto, l := range m.listeners {
+ go func(proto string, lis net.Listener) {
+ ctx := log.WithLogger(ctx, log.G(ctx).WithFields(
+ logrus.Fields{
+ "proto": lis.Addr().Network(),
+ "addr": lis.Addr().String()}))
+ if proto == "unix" {
+ log.G(ctx).Info("Listening for local connections")
+ errServe <- m.localserver.Serve(lis)
+ } else {
+ log.G(ctx).Info("Listening for connections")
+ errServe <- m.server.Serve(lis)
+ }
+ }(proto, l)
+ }
+
+ if err := raft.WaitForLeader(ctx, m.RaftNode); err != nil {
+ m.server.Stop()
+ return err
+ }
+
+ c, err := raft.WaitForCluster(ctx, m.RaftNode)
+ if err != nil {
+ m.server.Stop()
+ return err
+ }
+ raftConfig := c.Spec.Raft
+
+ if int(raftConfig.ElectionTick) != m.RaftNode.Config.ElectionTick {
+ log.G(ctx).Warningf("election tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.RaftNode.Config.ElectionTick, raftConfig.ElectionTick)
+ }
+ if int(raftConfig.HeartbeatTick) != m.RaftNode.Config.HeartbeatTick {
+ log.G(ctx).Warningf("heartbeat tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.RaftNode.Config.HeartbeatTick, raftConfig.HeartbeatTick)
+ }
+
+ // wait for an error in serving.
+ err = <-errServe
+ select {
+ // check to see if stopped was posted to. if so, we're in the process of
+ // stopping, or done and that's why we got the error. if stopping is
+ // deliberate, stopped will ALWAYS be closed before the error is trigger,
+ // so this path will ALWAYS be taken if the stop was deliberate
+ case <-m.stopped:
+ // shutdown was requested, do not return an error
+ // but first, we wait to acquire a mutex to guarantee that stopping is
+ // finished. as long as we acquire the mutex BEFORE we return, we know
+ // that stopping is stopped.
+ m.mu.Lock()
+ m.mu.Unlock()
+ return nil
+ // otherwise, we'll get something from errServe, which indicates that an
+ // error in serving has actually occurred and this isn't a planned shutdown
+ default:
+ return err
+ }
+}
+
+// Stop stops the manager. It immediately closes all open connections and
+// active RPCs as well as stopping the scheduler.
+func (m *Manager) Stop(ctx context.Context) {
+ log.G(ctx).Info("Stopping manager")
+
+ // the mutex stops us from trying to stop while we're alrady stopping, or
+ // from returning before we've finished stopping.
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ select {
+
+ // check to see that we've already stopped
+ case <-m.stopped:
+ return
+ default:
+ // do nothing, we're stopping for the first time
+ }
+
+ // once we start stopping, send a signal that we're doing so. this tells
+ // Run that we've started stopping, when it gets the error from errServe
+ // it also prevents the loop from processing any more stuff.
+ close(m.stopped)
+
+ m.Dispatcher.Stop()
+ m.caserver.Stop()
+
+ if m.allocator != nil {
+ m.allocator.Stop()
+ }
+ if m.replicatedOrchestrator != nil {
+ m.replicatedOrchestrator.Stop()
+ }
+ if m.globalOrchestrator != nil {
+ m.globalOrchestrator.Stop()
+ }
+ if m.taskReaper != nil {
+ m.taskReaper.Stop()
+ }
+ if m.scheduler != nil {
+ m.scheduler.Stop()
+ }
+ if m.keyManager != nil {
+ m.keyManager.Stop()
+ }
+
+ m.RaftNode.Shutdown()
+ // some time after this point, Run will recieve an error from one of these
+ m.server.Stop()
+ m.localserver.Stop()
+
+ log.G(ctx).Info("Manager shut down")
+ // mutex is released and Run can return now
+}
+
+// rotateRootCAKEK will attempt to rotate the key-encryption-key for root CA key-material in raft.
+// If there is no passphrase set in ENV, it returns.
+// If there is plain-text root key-material, and a passphrase set, it encrypts it.
+// If there is encrypted root key-material and it is using the current passphrase, it returns.
+// If there is encrypted root key-material, and it is using the previous passphrase, it
+// re-encrypts it with the current passphrase.
+func (m *Manager) rotateRootCAKEK(ctx context.Context, clusterID string) error {
+ // If we don't have a KEK, we won't ever be rotating anything
+ strPassphrase := os.Getenv(ca.PassphraseENVVar)
+ if strPassphrase == "" {
+ return nil
+ }
+ strPassphrasePrev := os.Getenv(ca.PassphraseENVVarPrev)
+ passphrase := []byte(strPassphrase)
+ passphrasePrev := []byte(strPassphrasePrev)
+
+ s := m.RaftNode.MemoryStore()
+ var (
+ cluster *api.Cluster
+ err error
+ finalKey []byte
+ )
+ // Retrieve the cluster identified by ClusterID
+ s.View(func(readTx store.ReadTx) {
+ cluster = store.GetCluster(readTx, clusterID)
+ })
+ if cluster == nil {
+ return fmt.Errorf("cluster not found: %s", clusterID)
+ }
+
+ // Try to get the private key from the cluster
+ privKeyPEM := cluster.RootCA.CAKey
+ if privKeyPEM == nil || len(privKeyPEM) == 0 {
+ // We have no PEM root private key in this cluster.
+ log.G(ctx).Warnf("cluster %s does not have private key material", clusterID)
+ return nil
+ }
+
+ // Decode the PEM private key
+ keyBlock, _ := pem.Decode(privKeyPEM)
+ if keyBlock == nil {
+ return fmt.Errorf("invalid PEM-encoded private key inside of cluster %s", clusterID)
+ }
+ // If this key is not encrypted, then we have to encrypt it
+ if !x509.IsEncryptedPEMBlock(keyBlock) {
+ finalKey, err = ca.EncryptECPrivateKey(privKeyPEM, strPassphrase)
+ if err != nil {
+ return err
+ }
+ } else {
+ // This key is already encrypted, let's try to decrypt with the current main passphrase
+ _, err = x509.DecryptPEMBlock(keyBlock, []byte(passphrase))
+ if err == nil {
+ // The main key is the correct KEK, nothing to do here
+ return nil
+ }
+ // This key is already encrypted, but failed with current main passphrase.
+ // Let's try to decrypt with the previous passphrase
+ unencryptedKey, err := x509.DecryptPEMBlock(keyBlock, []byte(passphrasePrev))
+ if err != nil {
+ // We were not able to decrypt either with the main or backup passphrase, error
+ return err
+ }
+ unencryptedKeyBlock := &pem.Block{
+ Type: keyBlock.Type,
+ Bytes: unencryptedKey,
+ Headers: keyBlock.Headers,
+ }
+
+ // We were able to decrypt the key, but with the previous passphrase. Let's encrypt
+ // with the new one and store it in raft
+ finalKey, err = ca.EncryptECPrivateKey(pem.EncodeToMemory(unencryptedKeyBlock), strPassphrase)
+ if err != nil {
+ log.G(ctx).Debugf("failed to rotate the key-encrypting-key for the root key material of cluster %s", clusterID)
+ return err
+ }
+ }
+
+ log.G(ctx).Infof("Re-encrypting the root key material of cluster %s", clusterID)
+ // Let's update the key in the cluster object
+ return s.Update(func(tx store.Tx) error {
+ cluster = store.GetCluster(tx, clusterID)
+ if cluster == nil {
+ return fmt.Errorf("cluster not found")
+ }
+ cluster.RootCA.CAKey = finalKey
+ return store.UpdateCluster(tx, cluster)
+ })
+
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/orchestrator/global.go b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/global.go
new file mode 100644
index 0000000000..be1a8f3b8a
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/global.go
@@ -0,0 +1,408 @@
+package orchestrator
+
+import (
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+)
+
+// GlobalOrchestrator runs a reconciliation loop to create and destroy
+// tasks as necessary for global services.
+type GlobalOrchestrator struct {
+ store *store.MemoryStore
+ // nodes contains nodeID of all valid nodes in the cluster
+ nodes map[string]struct{}
+ // globalServices have all the global services in the cluster, indexed by ServiceID
+ globalServices map[string]*api.Service
+
+ // stopChan signals to the state machine to stop running.
+ stopChan chan struct{}
+ // doneChan is closed when the state machine terminates.
+ doneChan chan struct{}
+
+ updater *UpdateSupervisor
+ restarts *RestartSupervisor
+}
+
+// NewGlobalOrchestrator creates a new GlobalOrchestrator
+func NewGlobalOrchestrator(store *store.MemoryStore) *GlobalOrchestrator {
+ restartSupervisor := NewRestartSupervisor(store)
+ updater := NewUpdateSupervisor(store, restartSupervisor)
+ return &GlobalOrchestrator{
+ store: store,
+ nodes: make(map[string]struct{}),
+ globalServices: make(map[string]*api.Service),
+ stopChan: make(chan struct{}),
+ doneChan: make(chan struct{}),
+ updater: updater,
+ restarts: restartSupervisor,
+ }
+}
+
+// Run contains the GlobalOrchestrator event loop
+func (g *GlobalOrchestrator) Run(ctx context.Context) error {
+ defer close(g.doneChan)
+
+ // Watch changes to services and tasks
+ queue := g.store.WatchQueue()
+ watcher, cancel := queue.Watch()
+ defer cancel()
+
+ // Get list of nodes
+ var (
+ nodes []*api.Node
+ err error
+ )
+ g.store.View(func(readTx store.ReadTx) {
+ nodes, err = store.FindNodes(readTx, store.All)
+ })
+ if err != nil {
+ return err
+ }
+ for _, n := range nodes {
+ // if a node is in drain state, do not add it
+ if isValidNode(n) {
+ g.nodes[n.ID] = struct{}{}
+ }
+ }
+
+ // Lookup global services
+ var existingServices []*api.Service
+ g.store.View(func(readTx store.ReadTx) {
+ existingServices, err = store.FindServices(readTx, store.All)
+ })
+ if err != nil {
+ return err
+ }
+ for _, s := range existingServices {
+ if isGlobalService(s) {
+ g.globalServices[s.ID] = s
+ g.reconcileOneService(ctx, s)
+ }
+ }
+
+ for {
+ select {
+ case event := <-watcher:
+ // TODO(stevvooe): Use ctx to limit running time of operation.
+ switch v := event.(type) {
+ case state.EventCreateService:
+ if !isGlobalService(v.Service) {
+ continue
+ }
+ g.globalServices[v.Service.ID] = v.Service
+ g.reconcileOneService(ctx, v.Service)
+ case state.EventUpdateService:
+ if !isGlobalService(v.Service) {
+ continue
+ }
+ g.globalServices[v.Service.ID] = v.Service
+ g.reconcileOneService(ctx, v.Service)
+ case state.EventDeleteService:
+ if !isGlobalService(v.Service) {
+ continue
+ }
+ deleteServiceTasks(ctx, g.store, v.Service)
+ // delete the service from service map
+ delete(g.globalServices, v.Service.ID)
+ g.restarts.ClearServiceHistory(v.Service.ID)
+ case state.EventCreateNode:
+ g.reconcileOneNode(ctx, v.Node)
+ case state.EventUpdateNode:
+ switch v.Node.Status.State {
+ // NodeStatus_DISCONNECTED is a transient state, no need to make any change
+ case api.NodeStatus_DOWN:
+ g.removeTasksFromNode(ctx, v.Node)
+ case api.NodeStatus_READY:
+ // node could come back to READY from DOWN or DISCONNECT
+ g.reconcileOneNode(ctx, v.Node)
+ }
+ case state.EventDeleteNode:
+ g.removeTasksFromNode(ctx, v.Node)
+ delete(g.nodes, v.Node.ID)
+ case state.EventUpdateTask:
+ if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
+ continue
+ }
+ // global orchestrator needs to inspect when a task has terminated
+ // it should ignore tasks whose DesiredState is past running, which
+ // means the task has been processed
+ if isTaskTerminated(v.Task) {
+ g.restartTask(ctx, v.Task.ID, v.Task.ServiceID)
+ }
+ case state.EventDeleteTask:
+ // CLI allows deleting task
+ if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
+ continue
+ }
+ g.reconcileServiceOneNode(ctx, v.Task.ServiceID, v.Task.NodeID)
+ }
+ case <-g.stopChan:
+ return nil
+ }
+ }
+}
+
+// Stop stops the orchestrator.
+func (g *GlobalOrchestrator) Stop() {
+ close(g.stopChan)
+ <-g.doneChan
+ g.updater.CancelAll()
+ g.restarts.CancelAll()
+}
+
+func (g *GlobalOrchestrator) removeTasksFromNode(ctx context.Context, node *api.Node) {
+ var (
+ tasks []*api.Task
+ err error
+ )
+ g.store.View(func(tx store.ReadTx) {
+ tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed finding tasks")
+ return
+ }
+
+ _, err = g.store.Batch(func(batch *store.Batch) error {
+ for _, t := range tasks {
+ // GlobalOrchestrator only removes tasks from globalServices
+ if _, exists := g.globalServices[t.ServiceID]; exists {
+ g.removeTask(ctx, batch, t)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed")
+ }
+}
+
+func (g *GlobalOrchestrator) reconcileOneService(ctx context.Context, service *api.Service) {
+ var (
+ tasks []*api.Task
+ err error
+ )
+ g.store.View(func(tx store.ReadTx) {
+ tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService failed finding tasks")
+ return
+ }
+ // a node may have completed this service
+ nodeCompleted := make(map[string]struct{})
+ // nodeID -> task list
+ nodeTasks := make(map[string][]*api.Task)
+
+ for _, t := range tasks {
+ if isTaskRunning(t) {
+ // Collect all running instances of this service
+ nodeTasks[t.NodeID] = append(nodeTasks[t.NodeID], t)
+ } else {
+ // for finished tasks, check restartPolicy
+ if isTaskCompleted(t, restartCondition(t)) {
+ nodeCompleted[t.NodeID] = struct{}{}
+ }
+ }
+ }
+
+ _, err = g.store.Batch(func(batch *store.Batch) error {
+ var updateTasks []*api.Task
+ for nodeID := range g.nodes {
+ ntasks := nodeTasks[nodeID]
+ // if restart policy considers this node has finished its task
+ // it should remove all running tasks
+ if _, exists := nodeCompleted[nodeID]; exists {
+ g.removeTasks(ctx, batch, service, ntasks)
+ return nil
+ }
+ // this node needs to run 1 copy of the task
+ if len(ntasks) == 0 {
+ g.addTask(ctx, batch, service, nodeID)
+ } else {
+ updateTasks = append(updateTasks, ntasks[0])
+ g.removeTasks(ctx, batch, service, ntasks[1:])
+ }
+ }
+ if len(updateTasks) > 0 {
+ g.updater.Update(ctx, service, updateTasks)
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService transaction failed")
+ }
+}
+
+// reconcileOneNode checks all global services on one node
+func (g *GlobalOrchestrator) reconcileOneNode(ctx context.Context, node *api.Node) {
+ switch node.Spec.Availability {
+ case api.NodeAvailabilityDrain:
+ log.G(ctx).Debugf("global orchestrator: node %s in drain state, removing tasks from it", node.ID)
+ g.removeTasksFromNode(ctx, node)
+ delete(g.nodes, node.ID)
+ return
+ case api.NodeAvailabilityActive:
+ if _, exists := g.nodes[node.ID]; !exists {
+ log.G(ctx).Debugf("global orchestrator: node %s not in current node list, adding it", node.ID)
+ g.nodes[node.ID] = struct{}{}
+ }
+ default:
+ log.G(ctx).Debugf("global orchestrator: node %s in %s state, doing nothing", node.ID, node.Spec.Availability.String())
+ return
+ }
+ // typically there are only a few global services on a node
+ // iterate thru all of them one by one. If raft store visits become a concern,
+ // it can be optimized.
+ for _, service := range g.globalServices {
+ g.reconcileServiceOneNode(ctx, service.ID, node.ID)
+ }
+}
+
+// reconcileServiceOneNode checks one service on one node
+func (g *GlobalOrchestrator) reconcileServiceOneNode(ctx context.Context, serviceID string, nodeID string) {
+ _, exists := g.nodes[nodeID]
+ if !exists {
+ return
+ }
+ service, exists := g.globalServices[serviceID]
+ if !exists {
+ return
+ }
+ // the node has completed this servie
+ completed := false
+ // tasks for this node and service
+ var (
+ tasks []*api.Task
+ err error
+ )
+ g.store.View(func(tx store.ReadTx) {
+ var tasksOnNode []*api.Task
+ tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID))
+ if err != nil {
+ return
+ }
+ for _, t := range tasksOnNode {
+ // only interested in one service
+ if t.ServiceID != serviceID {
+ continue
+ }
+ if isTaskRunning(t) {
+ tasks = append(tasks, t)
+ } else {
+ if isTaskCompleted(t, restartCondition(t)) {
+ completed = true
+ }
+ }
+ }
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks")
+ return
+ }
+
+ _, err = g.store.Batch(func(batch *store.Batch) error {
+ // if restart policy considers this node has finished its task
+ // it should remove all running tasks
+ if completed {
+ g.removeTasks(ctx, batch, service, tasks)
+ return nil
+ }
+ // this node needs to run 1 copy of the task
+ if len(tasks) == 0 {
+ g.addTask(ctx, batch, service, nodeID)
+ } else {
+ g.removeTasks(ctx, batch, service, tasks[1:])
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed")
+ }
+}
+
+// restartTask calls the restart supervisor's Restart function, which
+// sets a task's desired state to dead and restarts it if the restart
+// policy calls for it to be restarted.
+func (g *GlobalOrchestrator) restartTask(ctx context.Context, taskID string, serviceID string) {
+ err := g.store.Update(func(tx store.Tx) error {
+ t := store.GetTask(tx, taskID)
+ if t == nil || t.DesiredState > api.TaskStateRunning {
+ return nil
+ }
+ service := store.GetService(tx, serviceID)
+ if service == nil {
+ return nil
+ }
+ return g.restarts.Restart(ctx, tx, service, *t)
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: restartTask transaction failed")
+ }
+}
+
+func (g *GlobalOrchestrator) removeTask(ctx context.Context, batch *store.Batch, t *api.Task) {
+ // set existing task DesiredState to TaskStateShutdown
+ // TODO(aaronl): optimistic update?
+ err := batch.Update(func(tx store.Tx) error {
+ t = store.GetTask(tx, t.ID)
+ if t != nil {
+ t.DesiredState = api.TaskStateShutdown
+ return store.UpdateTask(tx, t)
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: removeTask failed to remove %s", t.ID)
+ }
+}
+
+func (g *GlobalOrchestrator) addTask(ctx context.Context, batch *store.Batch, service *api.Service, nodeID string) {
+ task := newTask(service, 0)
+ task.NodeID = nodeID
+
+ err := batch.Update(func(tx store.Tx) error {
+ return store.CreateTask(tx, task)
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("global orchestrator: failed to create task")
+ }
+}
+
+func (g *GlobalOrchestrator) removeTasks(ctx context.Context, batch *store.Batch, service *api.Service, tasks []*api.Task) {
+ for _, t := range tasks {
+ g.removeTask(ctx, batch, t)
+ }
+}
+
+func isTaskRunning(t *api.Task) bool {
+ return t != nil && t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning
+}
+
+func isValidNode(n *api.Node) bool {
+ // current simulation spec could be nil
+ return n != nil && n.Spec.Availability != api.NodeAvailabilityDrain
+}
+
+func isTaskCompleted(t *api.Task, restartPolicy api.RestartPolicy_RestartCondition) bool {
+ if t == nil || isTaskRunning(t) {
+ return false
+ }
+ return restartPolicy == api.RestartOnNone ||
+ (restartPolicy == api.RestartOnFailure && t.Status.State == api.TaskStateCompleted)
+}
+
+func isTaskTerminated(t *api.Task) bool {
+ return t != nil && t.Status.State > api.TaskStateRunning
+}
+
+func isGlobalService(service *api.Service) bool {
+ if service == nil {
+ return false
+ }
+ _, ok := service.Spec.GetMode().(*api.ServiceSpec_Global)
+ return ok
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/orchestrator/replicated.go b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/replicated.go
new file mode 100644
index 0000000000..cd73860c43
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/replicated.go
@@ -0,0 +1,170 @@
+package orchestrator
+
+import (
+ "time"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+)
+
+// A ReplicatedOrchestrator runs a reconciliation loop to create and destroy
+// tasks as necessary for the replicated services.
+type ReplicatedOrchestrator struct {
+ store *store.MemoryStore
+
+ reconcileServices map[string]*api.Service
+ restartTasks map[string]struct{}
+
+ // stopChan signals to the state machine to stop running.
+ stopChan chan struct{}
+ // doneChan is closed when the state machine terminates.
+ doneChan chan struct{}
+
+ updater *UpdateSupervisor
+ restarts *RestartSupervisor
+}
+
+// New creates a new ReplicatedOrchestrator.
+func New(store *store.MemoryStore) *ReplicatedOrchestrator {
+ restartSupervisor := NewRestartSupervisor(store)
+ updater := NewUpdateSupervisor(store, restartSupervisor)
+ return &ReplicatedOrchestrator{
+ store: store,
+ stopChan: make(chan struct{}),
+ doneChan: make(chan struct{}),
+ reconcileServices: make(map[string]*api.Service),
+ restartTasks: make(map[string]struct{}),
+ updater: updater,
+ restarts: restartSupervisor,
+ }
+}
+
+// Run contains the orchestrator event loop. It runs until Stop is called.
+func (r *ReplicatedOrchestrator) Run(ctx context.Context) error {
+ defer close(r.doneChan)
+
+ // Watch changes to services and tasks
+ queue := r.store.WatchQueue()
+ watcher, cancel := queue.Watch()
+ defer cancel()
+
+ // Balance existing services and drain initial tasks attached to invalid
+ // nodes
+ var err error
+ r.store.View(func(readTx store.ReadTx) {
+ if err = r.initTasks(ctx, readTx); err != nil {
+ return
+ }
+ err = r.initServices(readTx)
+ })
+ if err != nil {
+ return err
+ }
+
+ r.tick(ctx)
+
+ for {
+ select {
+ case event := <-watcher:
+ // TODO(stevvooe): Use ctx to limit running time of operation.
+ r.handleTaskEvent(ctx, event)
+ r.handleServiceEvent(ctx, event)
+ switch event.(type) {
+ case state.EventCommit:
+ r.tick(ctx)
+ }
+ case <-r.stopChan:
+ return nil
+ }
+ }
+}
+
+// Stop stops the orchestrator.
+func (r *ReplicatedOrchestrator) Stop() {
+ close(r.stopChan)
+ <-r.doneChan
+ r.updater.CancelAll()
+ r.restarts.CancelAll()
+}
+
+func (r *ReplicatedOrchestrator) tick(ctx context.Context) {
+ // tickTasks must be called first, so we respond to task-level changes
+ // before performing service reconcillation.
+ r.tickTasks(ctx)
+ r.tickServices(ctx)
+}
+
+func newTask(service *api.Service, instance uint64) *api.Task {
+ // NOTE(stevvooe): For now, we don't override the container naming and
+ // labeling scheme in the agent. If we decide to do this in the future,
+ // they should be overridden here.
+ return &api.Task{
+ ID: identity.NewID(),
+ ServiceAnnotations: service.Spec.Annotations,
+ Spec: service.Spec.Task,
+ ServiceID: service.ID,
+ Slot: instance,
+ Status: api.TaskStatus{
+ State: api.TaskStateNew,
+ Timestamp: ptypes.MustTimestampProto(time.Now()),
+ Message: "created",
+ },
+ DesiredState: api.TaskStateRunning,
+ }
+}
+
+// isReplicatedService checks if a service is a replicated service
+func isReplicatedService(service *api.Service) bool {
+ // service nil validation is required as there are scenarios
+ // where service is removed from store
+ if service == nil {
+ return false
+ }
+ _, ok := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
+ return ok
+}
+
+func deleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
+ var (
+ tasks []*api.Task
+ err error
+ )
+ s.View(func(tx store.ReadTx) {
+ tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("failed to list tasks")
+ return
+ }
+
+ _, err = s.Batch(func(batch *store.Batch) error {
+ for _, t := range tasks {
+ err := batch.Update(func(tx store.Tx) error {
+ if err := store.DeleteTask(tx, t.ID); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed to delete task")
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("task search transaction failed")
+ }
+}
+
+func restartCondition(task *api.Task) api.RestartPolicy_RestartCondition {
+ restartCondition := api.RestartOnAny
+ if task.Spec.Restart != nil {
+ restartCondition = task.Spec.Restart.Condition
+ }
+ return restartCondition
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go
new file mode 100644
index 0000000000..f7ab79b6e1
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go
@@ -0,0 +1,383 @@
+package orchestrator
+
+import (
+ "container/list"
+ "sync"
+ "time"
+
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+)
+
+const defaultOldTaskTimeout = time.Minute
+const defaultRestartDelay = 5 * time.Second
+
+type restartedInstance struct {
+ timestamp time.Time
+}
+
+type instanceRestartInfo struct {
+ // counter of restarts for this instance.
+ totalRestarts uint64
+ // Linked list of restartedInstance structs. Only used when
+ // Restart.MaxAttempts and Restart.Window are both
+ // nonzero.
+ restartedInstances *list.List
+}
+
+type delayedStart struct {
+ cancel func()
+ doneCh chan struct{}
+}
+
+// RestartSupervisor initiates and manages restarts. It's responsible for
+// delaying restarts when applicable.
+type RestartSupervisor struct {
+ mu sync.Mutex
+ store *store.MemoryStore
+ delays map[string]delayedStart
+ history map[instanceTuple]*instanceRestartInfo
+ historyByService map[string]map[instanceTuple]struct{}
+ taskTimeout time.Duration
+}
+
+// NewRestartSupervisor creates a new RestartSupervisor.
+func NewRestartSupervisor(store *store.MemoryStore) *RestartSupervisor {
+ return &RestartSupervisor{
+ store: store,
+ delays: make(map[string]delayedStart),
+ history: make(map[instanceTuple]*instanceRestartInfo),
+ historyByService: make(map[string]map[instanceTuple]struct{}),
+ taskTimeout: defaultOldTaskTimeout,
+ }
+}
+
+// Restart initiates a new task to replace t if appropriate under the service's
+// restart policy.
+func (r *RestartSupervisor) Restart(ctx context.Context, tx store.Tx, service *api.Service, t api.Task) error {
+ // TODO(aluzzardi): This function should not depend on `service`.
+
+ t.DesiredState = api.TaskStateShutdown
+ err := store.UpdateTask(tx, &t)
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("failed to set task desired state to dead")
+ return err
+ }
+
+ if !r.shouldRestart(ctx, &t, service) {
+ return nil
+ }
+
+ var restartTask *api.Task
+
+ if isReplicatedService(service) {
+ restartTask = newTask(service, t.Slot)
+ } else if isGlobalService(service) {
+ restartTask = newTask(service, 0)
+ restartTask.NodeID = t.NodeID
+ } else {
+ log.G(ctx).Error("service not supported by restart supervisor")
+ return nil
+ }
+
+ n := store.GetNode(tx, t.NodeID)
+
+ restartTask.DesiredState = api.TaskStateAccepted
+
+ var restartDelay time.Duration
+ // Restart delay does not applied to drained nodes
+ if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
+ if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
+ var err error
+ restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("invalid restart delay; using default")
+ restartDelay = defaultRestartDelay
+ }
+ } else {
+ restartDelay = defaultRestartDelay
+ }
+ }
+
+ waitStop := true
+
+ // Normally we wait for the old task to stop running, but we skip this
+ // if the old task is already dead or the node it's assigned to is down.
+ if (n != nil && n.Status.State == api.NodeStatus_DOWN) || t.Status.State > api.TaskStateRunning {
+ waitStop = false
+ }
+
+ if err := store.CreateTask(tx, restartTask); err != nil {
+ log.G(ctx).WithError(err).WithField("task.id", restartTask.ID).Error("task create failed")
+ return err
+ }
+
+ r.recordRestartHistory(restartTask)
+
+ r.DelayStart(ctx, tx, &t, restartTask.ID, restartDelay, waitStop)
+ return nil
+}
+
+func (r *RestartSupervisor) shouldRestart(ctx context.Context, t *api.Task, service *api.Service) bool {
+ // TODO(aluzzardi): This function should not depend on `service`.
+
+ condition := restartCondition(t)
+
+ if condition != api.RestartOnAny &&
+ (condition != api.RestartOnFailure || t.Status.State == api.TaskStateCompleted) {
+ return false
+ }
+
+ if t.Spec.Restart == nil || t.Spec.Restart.MaxAttempts == 0 {
+ return true
+ }
+
+ instanceTuple := instanceTuple{
+ instance: t.Slot,
+ serviceID: t.ServiceID,
+ }
+
+ // Instance is not meaningful for "global" tasks, so they need to be
+ // indexed by NodeID.
+ if isGlobalService(service) {
+ instanceTuple.nodeID = t.NodeID
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ restartInfo := r.history[instanceTuple]
+ if restartInfo == nil {
+ return true
+ }
+
+ if t.Spec.Restart.Window == nil || (t.Spec.Restart.Window.Seconds == 0 && t.Spec.Restart.Window.Nanos == 0) {
+ return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts
+ }
+
+ if restartInfo.restartedInstances == nil {
+ return true
+ }
+
+ window, err := ptypes.Duration(t.Spec.Restart.Window)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("invalid restart lookback window")
+ return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts
+ }
+ lookback := time.Now().Add(-window)
+
+ var next *list.Element
+ for e := restartInfo.restartedInstances.Front(); e != nil; e = next {
+ next = e.Next()
+
+ if e.Value.(restartedInstance).timestamp.After(lookback) {
+ break
+ }
+ restartInfo.restartedInstances.Remove(e)
+ }
+
+ numRestarts := uint64(restartInfo.restartedInstances.Len())
+
+ if numRestarts == 0 {
+ restartInfo.restartedInstances = nil
+ }
+
+ return numRestarts < t.Spec.Restart.MaxAttempts
+}
+
+func (r *RestartSupervisor) recordRestartHistory(restartTask *api.Task) {
+ if restartTask.Spec.Restart == nil || restartTask.Spec.Restart.MaxAttempts == 0 {
+ // No limit on the number of restarts, so no need to record
+ // history.
+ return
+ }
+ tuple := instanceTuple{
+ instance: restartTask.Slot,
+ serviceID: restartTask.ServiceID,
+ nodeID: restartTask.NodeID,
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if r.history[tuple] == nil {
+ r.history[tuple] = &instanceRestartInfo{}
+ }
+
+ restartInfo := r.history[tuple]
+ restartInfo.totalRestarts++
+
+ if r.historyByService[restartTask.ServiceID] == nil {
+ r.historyByService[restartTask.ServiceID] = make(map[instanceTuple]struct{})
+ }
+ r.historyByService[restartTask.ServiceID][tuple] = struct{}{}
+
+ if restartTask.Spec.Restart.Window != nil && (restartTask.Spec.Restart.Window.Seconds != 0 || restartTask.Spec.Restart.Window.Nanos != 0) {
+ if restartInfo.restartedInstances == nil {
+ restartInfo.restartedInstances = list.New()
+ }
+
+ restartedInstance := restartedInstance{
+ timestamp: time.Now(),
+ }
+
+ restartInfo.restartedInstances.PushBack(restartedInstance)
+ }
+}
+
+// DelayStart starts a timer that moves the task from READY to RUNNING once:
+// - The restart delay has elapsed (if applicable)
+// - The old task that it's replacing has stopped running (or this times out)
+// It must be called during an Update transaction to ensure that it does not
+// miss events. The purpose of the store.Tx argument is to avoid accidental
+// calls outside an Update transaction.
+func (r *RestartSupervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask *api.Task, newTaskID string, delay time.Duration, waitStop bool) <-chan struct{} {
+ ctx, cancel := context.WithCancel(context.Background())
+ doneCh := make(chan struct{})
+
+ r.mu.Lock()
+ for {
+ oldDelay, ok := r.delays[newTaskID]
+ if !ok {
+ break
+ }
+ oldDelay.cancel()
+ r.mu.Unlock()
+ // Note that this channel read should only block for a very
+ // short time, because we cancelled the existing delay and
+ // that should cause it to stop immediately.
+ <-oldDelay.doneCh
+ r.mu.Lock()
+ }
+ r.delays[newTaskID] = delayedStart{cancel: cancel, doneCh: doneCh}
+ r.mu.Unlock()
+
+ var watch chan events.Event
+ cancelWatch := func() {}
+
+ if waitStop && oldTask != nil {
+ // Wait for either the old task to complete, or the old task's
+ // node to become unavailable.
+ watch, cancelWatch = state.Watch(
+ r.store.WatchQueue(),
+ state.EventUpdateTask{
+ Task: &api.Task{ID: oldTask.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
+ Checks: []state.TaskCheckFunc{state.TaskCheckID, state.TaskCheckStateGreaterThan},
+ },
+ state.EventUpdateNode{
+ Node: &api.Node{ID: oldTask.NodeID, Status: api.NodeStatus{State: api.NodeStatus_DOWN}},
+ Checks: []state.NodeCheckFunc{state.NodeCheckID, state.NodeCheckState},
+ },
+ state.EventDeleteNode{
+ Node: &api.Node{ID: oldTask.NodeID},
+ Checks: []state.NodeCheckFunc{state.NodeCheckID},
+ },
+ )
+ }
+
+ go func() {
+ defer func() {
+ cancelWatch()
+ r.mu.Lock()
+ delete(r.delays, newTaskID)
+ r.mu.Unlock()
+ close(doneCh)
+ }()
+
+ oldTaskTimeout := time.After(r.taskTimeout)
+
+ // Wait for the delay to elapse, if one is specified.
+ if delay != 0 {
+ select {
+ case <-time.After(delay):
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ if waitStop {
+ select {
+ case <-watch:
+ case <-oldTaskTimeout:
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ err := r.store.Update(func(tx store.Tx) error {
+ err := r.StartNow(tx, newTaskID)
+ if err != nil {
+ log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("moving task out of delayed state failed")
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("task restart transaction failed")
+ }
+ }()
+
+ return doneCh
+}
+
+// StartNow moves the task into the RUNNING state so it will proceed to start
+// up.
+func (r *RestartSupervisor) StartNow(tx store.Tx, taskID string) error {
+ t := store.GetTask(tx, taskID)
+ if t == nil || t.DesiredState >= api.TaskStateRunning {
+ return nil
+ }
+ t.DesiredState = api.TaskStateRunning
+ return store.UpdateTask(tx, t)
+}
+
+// Cancel cancels a pending restart.
+func (r *RestartSupervisor) Cancel(taskID string) {
+ r.mu.Lock()
+ delay, ok := r.delays[taskID]
+ r.mu.Unlock()
+
+ if !ok {
+ return
+ }
+
+ delay.cancel()
+ <-delay.doneCh
+}
+
+// CancelAll aborts all pending restarts and waits for any instances of
+// StartNow that have already triggered to complete.
+func (r *RestartSupervisor) CancelAll() {
+ var cancelled []delayedStart
+
+ r.mu.Lock()
+ for _, delay := range r.delays {
+ delay.cancel()
+ }
+ r.mu.Unlock()
+
+ for _, delay := range cancelled {
+ <-delay.doneCh
+ }
+}
+
+// ClearServiceHistory forgets restart history related to a given service ID.
+func (r *RestartSupervisor) ClearServiceHistory(serviceID string) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ tuples := r.historyByService[serviceID]
+ if tuples == nil {
+ return
+ }
+
+ delete(r.historyByService, serviceID)
+
+ for t := range tuples {
+ delete(r.history, t)
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/orchestrator/services.go b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/services.go
new file mode 100644
index 0000000000..7f775dd857
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/services.go
@@ -0,0 +1,163 @@
+package orchestrator
+
+import (
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+)
+
+// This file provices service-level orchestration. It observes changes to
+// services and creates and destroys tasks as necessary to match the service
+// specifications. This is different from task-level orchestration, which
+// responds to changes in individual tasks (or nodes which run them).
+
+func (r *ReplicatedOrchestrator) initServices(readTx store.ReadTx) error {
+ services, err := store.FindServices(readTx, store.All)
+ if err != nil {
+ return err
+ }
+ for _, s := range services {
+ if isReplicatedService(s) {
+ r.reconcileServices[s.ID] = s
+ }
+ }
+ return nil
+}
+
+func (r *ReplicatedOrchestrator) handleServiceEvent(ctx context.Context, event events.Event) {
+ switch v := event.(type) {
+ case state.EventDeleteService:
+ if !isReplicatedService(v.Service) {
+ return
+ }
+ deleteServiceTasks(ctx, r.store, v.Service)
+ r.restarts.ClearServiceHistory(v.Service.ID)
+ case state.EventCreateService:
+ if !isReplicatedService(v.Service) {
+ return
+ }
+ r.reconcileServices[v.Service.ID] = v.Service
+ case state.EventUpdateService:
+ if !isReplicatedService(v.Service) {
+ return
+ }
+ r.reconcileServices[v.Service.ID] = v.Service
+ }
+}
+
+func (r *ReplicatedOrchestrator) tickServices(ctx context.Context) {
+ if len(r.reconcileServices) > 0 {
+ for _, s := range r.reconcileServices {
+ r.reconcile(ctx, s)
+ }
+ r.reconcileServices = make(map[string]*api.Service)
+ }
+}
+
+func (r *ReplicatedOrchestrator) resolveService(ctx context.Context, task *api.Task) *api.Service {
+ if task.ServiceID == "" {
+ return nil
+ }
+ var service *api.Service
+ r.store.View(func(tx store.ReadTx) {
+ service = store.GetService(tx, task.ServiceID)
+ })
+ return service
+}
+
+func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
+ var (
+ tasks []*api.Task
+ err error
+ )
+ r.store.View(func(tx store.ReadTx) {
+ tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
+ return
+ }
+
+ runningTasks := make([]*api.Task, 0, len(tasks))
+ runningInstances := make(map[uint64]struct{}) // this could be a bitfield...
+ for _, t := range tasks {
+ // Technically the check below could just be
+ // t.DesiredState <= api.TaskStateRunning, but ignoring tasks
+ // with DesiredState == NEW simplifies the drainer unit tests.
+ if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
+ runningTasks = append(runningTasks, t)
+ runningInstances[t.Slot] = struct{}{}
+ }
+ }
+ numTasks := len(runningTasks)
+
+ deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
+ specifiedInstances := int(deploy.Replicated.Replicas)
+
+ // TODO(aaronl): Add support for restart delays.
+
+ _, err = r.store.Batch(func(batch *store.Batch) error {
+ switch {
+ case specifiedInstances > numTasks:
+ log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numTasks, specifiedInstances)
+ // Update all current tasks then add missing tasks
+ r.updater.Update(ctx, service, runningTasks)
+ r.addTasks(ctx, batch, service, runningInstances, specifiedInstances-numTasks)
+
+ case specifiedInstances < numTasks:
+ // Update up to N tasks then remove the extra
+ log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numTasks, specifiedInstances)
+ r.updater.Update(ctx, service, runningTasks[:specifiedInstances])
+ r.removeTasks(ctx, batch, service, runningTasks[specifiedInstances:])
+
+ case specifiedInstances == numTasks:
+ // Simple update, no scaling - update all tasks.
+ r.updater.Update(ctx, service, runningTasks)
+ }
+ return nil
+ })
+
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("reconcile batch failed")
+ }
+}
+
+func (r *ReplicatedOrchestrator) addTasks(ctx context.Context, batch *store.Batch, service *api.Service, runningInstances map[uint64]struct{}, count int) {
+ instance := uint64(0)
+ for i := 0; i < count; i++ {
+ // Find an instance number that is missing a running task
+ for {
+ instance++
+ if _, ok := runningInstances[instance]; !ok {
+ break
+ }
+ }
+
+ err := batch.Update(func(tx store.Tx) error {
+ return store.CreateTask(tx, newTask(service, instance))
+ })
+ if err != nil {
+ log.G(ctx).Errorf("Failed to create task: %v", err)
+ }
+ }
+}
+
+func (r *ReplicatedOrchestrator) removeTasks(ctx context.Context, batch *store.Batch, service *api.Service, tasks []*api.Task) {
+ for _, t := range tasks {
+ err := batch.Update(func(tx store.Tx) error {
+ // TODO(aaronl): optimistic update?
+ t = store.GetTask(tx, t.ID)
+ if t != nil {
+ t.DesiredState = api.TaskStateShutdown
+ return store.UpdateTask(tx, t)
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("removing task %s failed", t.ID)
+ }
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/orchestrator/task_reaper.go b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/task_reaper.go
new file mode 100644
index 0000000000..520b3be7d2
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/task_reaper.go
@@ -0,0 +1,203 @@
+package orchestrator
+
+import (
+ "sort"
+ "time"
+
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+)
+
+const (
+ // maxDirty is the size threshold for running a task pruning operation.
+ maxDirty = 1000
+)
+
+type instanceTuple struct {
+ instance uint64 // unset for global tasks
+ serviceID string
+ nodeID string // unset for replicated tasks
+}
+
+// A TaskReaper deletes old tasks when more than TaskHistoryRetentionLimit tasks
+// exist for the same service/instance or service/nodeid combination.
+type TaskReaper struct {
+ store *store.MemoryStore
+ // taskHistory is the number of tasks to keep
+ taskHistory int64
+ dirty map[instanceTuple]struct{}
+ watcher chan events.Event
+ cancelWatch func()
+ stopChan chan struct{}
+ doneChan chan struct{}
+}
+
+// NewTaskReaper creates a new TaskReaper.
+func NewTaskReaper(store *store.MemoryStore) *TaskReaper {
+ watcher, cancel := state.Watch(store.WatchQueue(), state.EventCreateTask{}, state.EventUpdateCluster{})
+
+ return &TaskReaper{
+ store: store,
+ watcher: watcher,
+ cancelWatch: cancel,
+ dirty: make(map[instanceTuple]struct{}),
+ stopChan: make(chan struct{}),
+ doneChan: make(chan struct{}),
+ }
+}
+
+// Run is the TaskReaper's main loop.
+func (tr *TaskReaper) Run() {
+ defer close(tr.doneChan)
+
+ tr.store.View(func(readTx store.ReadTx) {
+ clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
+ if err == nil && len(clusters) == 1 {
+ tr.taskHistory = clusters[0].Spec.Orchestration.TaskHistoryRetentionLimit
+ }
+ })
+
+ ticker := time.NewTicker(250 * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case event := <-tr.watcher:
+ switch v := event.(type) {
+ case state.EventCreateTask:
+ t := v.Task
+ tr.dirty[instanceTuple{
+ instance: t.Slot,
+ serviceID: t.ServiceID,
+ nodeID: t.NodeID,
+ }] = struct{}{}
+ if len(tr.dirty) > maxDirty {
+ tr.tick()
+ }
+ case state.EventUpdateCluster:
+ tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
+ }
+ case <-ticker.C:
+ tr.tick()
+ case <-tr.stopChan:
+ return
+ }
+ }
+}
+
+func (tr *TaskReaper) tick() {
+ if len(tr.dirty) == 0 {
+ return
+ }
+
+ defer func() {
+ tr.dirty = make(map[instanceTuple]struct{})
+ }()
+
+ var deleteTasks []string
+
+ tr.store.View(func(tx store.ReadTx) {
+ for dirty := range tr.dirty {
+ service := store.GetService(tx, dirty.serviceID)
+ if service == nil {
+ continue
+ }
+
+ taskHistory := tr.taskHistory
+
+ if taskHistory < 0 {
+ continue
+ }
+
+ var historicTasks []*api.Task
+
+ switch service.Spec.GetMode().(type) {
+ case *api.ServiceSpec_Replicated:
+ var err error
+ historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.serviceID, dirty.instance))
+ if err != nil {
+ continue
+ }
+
+ case *api.ServiceSpec_Global:
+ tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.nodeID))
+ if err != nil {
+ continue
+ }
+
+ for _, t := range tasksByNode {
+ if t.ServiceID == dirty.serviceID {
+ historicTasks = append(historicTasks, t)
+ }
+ }
+ }
+
+ if int64(len(historicTasks)) <= taskHistory {
+ continue
+ }
+
+ // TODO(aaronl): This could filter for non-running tasks and use quickselect
+ // instead of sorting the whole slice.
+ sort.Sort(tasksByTimestamp(historicTasks))
+
+ for _, t := range historicTasks {
+ if t.DesiredState <= api.TaskStateRunning {
+ // Don't delete running tasks
+ continue
+ }
+
+ deleteTasks = append(deleteTasks, t.ID)
+
+ taskHistory++
+ if int64(len(historicTasks)) <= taskHistory {
+ break
+ }
+ }
+
+ }
+ })
+
+ if len(deleteTasks) > 0 {
+ tr.store.Batch(func(batch *store.Batch) error {
+ for _, taskID := range deleteTasks {
+ batch.Update(func(tx store.Tx) error {
+ return store.DeleteTask(tx, taskID)
+ })
+ }
+ return nil
+ })
+ }
+}
+
+// Stop stops the TaskReaper and waits for the main loop to exit.
+func (tr *TaskReaper) Stop() {
+ tr.cancelWatch()
+ close(tr.stopChan)
+ <-tr.doneChan
+}
+
+type tasksByTimestamp []*api.Task
+
+func (t tasksByTimestamp) Len() int {
+ return len(t)
+}
+func (t tasksByTimestamp) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+func (t tasksByTimestamp) Less(i, j int) bool {
+ if t[i].Status.Timestamp == nil {
+ return true
+ }
+ if t[j].Status.Timestamp == nil {
+ return false
+ }
+ if t[i].Status.Timestamp.Seconds < t[j].Status.Timestamp.Seconds {
+ return true
+ }
+ if t[i].Status.Timestamp.Seconds > t[j].Status.Timestamp.Seconds {
+ return false
+ }
+ return t[i].Status.Timestamp.Nanos < t[j].Status.Timestamp.Nanos
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/orchestrator/tasks.go b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/tasks.go
new file mode 100644
index 0000000000..add5ed9a07
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/tasks.go
@@ -0,0 +1,233 @@
+package orchestrator
+
+import (
+ "time"
+
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+)
+
+// This file provides task-level orchestration. It observes changes to task
+// and node state and kills/recreates tasks if necessary. This is distinct from
+// service-level reconcillation, which observes changes to services and creates
+// and/or kills tasks to match the service definition.
+
+func invalidNode(n *api.Node) bool {
+ return n == nil ||
+ n.Status.State == api.NodeStatus_DOWN ||
+ n.Spec.Availability == api.NodeAvailabilityDrain
+}
+
+func (r *ReplicatedOrchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error {
+ tasks, err := store.FindTasks(readTx, store.All)
+ if err != nil {
+ return err
+ }
+ for _, t := range tasks {
+ if t.NodeID != "" {
+ n := store.GetNode(readTx, t.NodeID)
+ if invalidNode(n) && t.Status.State <= api.TaskStateRunning && t.DesiredState <= api.TaskStateRunning {
+ r.restartTasks[t.ID] = struct{}{}
+ }
+ }
+ }
+
+ _, err = r.store.Batch(func(batch *store.Batch) error {
+ for _, t := range tasks {
+ if t.ServiceID == "" {
+ continue
+ }
+
+ // TODO(aluzzardi): We should NOT retrieve the service here.
+ service := store.GetService(readTx, t.ServiceID)
+ if service == nil {
+ // Service was deleted
+ err := batch.Update(func(tx store.Tx) error {
+ return store.DeleteTask(tx, t.ID)
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed to set task desired state to dead")
+ }
+ continue
+ }
+ // TODO(aluzzardi): This is shady. We should have a more generic condition.
+ if t.DesiredState != api.TaskStateAccepted || !isReplicatedService(service) {
+ continue
+ }
+ restartDelay := defaultRestartDelay
+ if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
+ var err error
+ restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("invalid restart delay")
+ restartDelay = defaultRestartDelay
+ }
+ }
+ if restartDelay != 0 {
+ timestamp, err := ptypes.Timestamp(t.Status.Timestamp)
+ if err == nil {
+ restartTime := timestamp.Add(restartDelay)
+ calculatedRestartDelay := restartTime.Sub(time.Now())
+ if calculatedRestartDelay < restartDelay {
+ restartDelay = calculatedRestartDelay
+ }
+ if restartDelay > 0 {
+ _ = batch.Update(func(tx store.Tx) error {
+ t := store.GetTask(tx, t.ID)
+ // TODO(aluzzardi): This is shady as well. We should have a more generic condition.
+ if t == nil || t.DesiredState != api.TaskStateAccepted {
+ return nil
+ }
+ r.restarts.DelayStart(ctx, tx, nil, t.ID, restartDelay, true)
+ return nil
+ })
+ continue
+ }
+ } else {
+ log.G(ctx).WithError(err).Error("invalid status timestamp")
+ }
+ }
+
+ // Start now
+ err := batch.Update(func(tx store.Tx) error {
+ return r.restarts.StartNow(tx, t.ID)
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("moving task out of delayed state failed")
+ }
+ }
+ return nil
+ })
+
+ return err
+}
+
+func (r *ReplicatedOrchestrator) handleTaskEvent(ctx context.Context, event events.Event) {
+ switch v := event.(type) {
+ case state.EventDeleteNode:
+ r.restartTasksByNodeID(ctx, v.Node.ID)
+ case state.EventCreateNode:
+ r.handleNodeChange(ctx, v.Node)
+ case state.EventUpdateNode:
+ r.handleNodeChange(ctx, v.Node)
+ case state.EventDeleteTask:
+ if v.Task.DesiredState <= api.TaskStateRunning {
+ service := r.resolveService(ctx, v.Task)
+ if !isReplicatedService(service) {
+ return
+ }
+ r.reconcileServices[service.ID] = service
+ }
+ r.restarts.Cancel(v.Task.ID)
+ case state.EventUpdateTask:
+ r.handleTaskChange(ctx, v.Task)
+ case state.EventCreateTask:
+ r.handleTaskChange(ctx, v.Task)
+ }
+}
+
+func (r *ReplicatedOrchestrator) tickTasks(ctx context.Context) {
+ if len(r.restartTasks) > 0 {
+ _, err := r.store.Batch(func(batch *store.Batch) error {
+ for taskID := range r.restartTasks {
+ err := batch.Update(func(tx store.Tx) error {
+ // TODO(aaronl): optimistic update?
+ t := store.GetTask(tx, taskID)
+ if t != nil {
+ if t.DesiredState > api.TaskStateRunning {
+ return nil
+ }
+
+ service := store.GetService(tx, t.ServiceID)
+ if !isReplicatedService(service) {
+ return nil
+ }
+
+ // Restart task if applicable
+ if err := r.restarts.Restart(ctx, tx, service, *t); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("ReplicatedOrchestrator task reaping transaction failed")
+ }
+ }
+ return nil
+ })
+
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("orchestator task removal batch failed")
+ }
+
+ r.restartTasks = make(map[string]struct{})
+ }
+}
+
+func (r *ReplicatedOrchestrator) restartTasksByNodeID(ctx context.Context, nodeID string) {
+ var err error
+ r.store.View(func(tx store.ReadTx) {
+ var tasks []*api.Task
+ tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
+ if err != nil {
+ return
+ }
+
+ for _, t := range tasks {
+ if t.DesiredState > api.TaskStateRunning {
+ continue
+ }
+ service := store.GetService(tx, t.ServiceID)
+ if isReplicatedService(service) {
+ r.restartTasks[t.ID] = struct{}{}
+ }
+ }
+ })
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("failed to list tasks to remove")
+ }
+}
+
+func (r *ReplicatedOrchestrator) handleNodeChange(ctx context.Context, n *api.Node) {
+ if !invalidNode(n) {
+ return
+ }
+
+ r.restartTasksByNodeID(ctx, n.ID)
+}
+
+func (r *ReplicatedOrchestrator) handleTaskChange(ctx context.Context, t *api.Task) {
+ // If we already set the desired state past TaskStateRunning, there is no
+ // further action necessary.
+ if t.DesiredState > api.TaskStateRunning {
+ return
+ }
+
+ var (
+ n *api.Node
+ service *api.Service
+ )
+ r.store.View(func(tx store.ReadTx) {
+ if t.NodeID != "" {
+ n = store.GetNode(tx, t.NodeID)
+ }
+ if t.ServiceID != "" {
+ service = store.GetService(tx, t.ServiceID)
+ }
+ })
+
+ if !isReplicatedService(service) {
+ return
+ }
+
+ if t.Status.State > api.TaskStateRunning ||
+ (t.NodeID != "" && invalidNode(n)) {
+ r.restartTasks[t.ID] = struct{}{}
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go
new file mode 100644
index 0000000000..83ac19c802
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go
@@ -0,0 +1,228 @@
+package orchestrator
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/manager/state/watch"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+)
+
+// UpdateSupervisor supervises a set of updates. It's responsible for keeping track of updates,
+// shutting them down and replacing them.
+type UpdateSupervisor struct {
+ store *store.MemoryStore
+ restarts *RestartSupervisor
+ updates map[string]*Updater
+ l sync.Mutex
+}
+
+// NewUpdateSupervisor creates a new UpdateSupervisor.
+func NewUpdateSupervisor(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *UpdateSupervisor {
+ return &UpdateSupervisor{
+ store: store,
+ updates: make(map[string]*Updater),
+ restarts: restartSupervisor,
+ }
+}
+
+// Update starts an Update of `tasks` belonging to `service` in the background and returns immediately.
+// If an update for that service was already in progress, it will be cancelled before the new one starts.
+func (u *UpdateSupervisor) Update(ctx context.Context, service *api.Service, tasks []*api.Task) {
+ u.l.Lock()
+ defer u.l.Unlock()
+
+ id := service.ID
+
+ if update, ok := u.updates[id]; ok {
+ update.Cancel()
+ }
+
+ update := NewUpdater(u.store, u.restarts)
+ u.updates[id] = update
+ go func() {
+ update.Run(ctx, service, tasks)
+ u.l.Lock()
+ if u.updates[id] == update {
+ delete(u.updates, id)
+ }
+ u.l.Unlock()
+ }()
+}
+
+// CancelAll cancels all current updates.
+func (u *UpdateSupervisor) CancelAll() {
+ u.l.Lock()
+ defer u.l.Unlock()
+
+ for _, update := range u.updates {
+ update.Cancel()
+ }
+}
+
+// Updater updates a set of tasks to a new version.
+type Updater struct {
+ store *store.MemoryStore
+ watchQueue *watch.Queue
+ restarts *RestartSupervisor
+
+ // stopChan signals to the state machine to stop running.
+ stopChan chan struct{}
+ // doneChan is closed when the state machine terminates.
+ doneChan chan struct{}
+}
+
+// NewUpdater creates a new Updater.
+func NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *Updater {
+ return &Updater{
+ store: store,
+ watchQueue: store.WatchQueue(),
+ restarts: restartSupervisor,
+ stopChan: make(chan struct{}),
+ doneChan: make(chan struct{}),
+ }
+}
+
+// Cancel cancels the current update immediately. It blocks until the cancellation is confirmed.
+func (u *Updater) Cancel() {
+ close(u.stopChan)
+ <-u.doneChan
+}
+
+// Run starts the update and returns only once its complete or cancelled.
+func (u *Updater) Run(ctx context.Context, service *api.Service, tasks []*api.Task) {
+ defer close(u.doneChan)
+
+ dirtyTasks := []*api.Task{}
+ for _, t := range tasks {
+ if !reflect.DeepEqual(service.Spec.Task, t.Spec) {
+ dirtyTasks = append(dirtyTasks, t)
+ }
+ }
+ // Abort immediately if all tasks are clean.
+ if len(dirtyTasks) == 0 {
+ return
+ }
+
+ parallelism := 0
+ if service.Spec.Update != nil {
+ parallelism = int(service.Spec.Update.Parallelism)
+ }
+ if parallelism == 0 {
+ // TODO(aluzzardi): We could try to optimize unlimited parallelism by performing updates in a single
+ // goroutine using a batch transaction.
+ parallelism = len(dirtyTasks)
+ }
+
+ // Start the workers.
+ taskQueue := make(chan *api.Task)
+ wg := sync.WaitGroup{}
+ wg.Add(parallelism)
+ for i := 0; i < parallelism; i++ {
+ go func() {
+ u.worker(ctx, service, taskQueue)
+ wg.Done()
+ }()
+ }
+
+ for _, t := range dirtyTasks {
+ // Wait for a worker to pick up the task or abort the update, whichever comes first.
+ select {
+ case <-u.stopChan:
+ break
+
+ case taskQueue <- t:
+ }
+ }
+
+ close(taskQueue)
+ wg.Wait()
+}
+
+func (u *Updater) worker(ctx context.Context, service *api.Service, queue <-chan *api.Task) {
+ for t := range queue {
+ updated := newTask(service, t.Slot)
+ updated.DesiredState = api.TaskStateReady
+ if isGlobalService(service) {
+ updated.NodeID = t.NodeID
+ }
+
+ if err := u.updateTask(ctx, service, t, updated); err != nil {
+ log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("update failed")
+ }
+
+ if service.Spec.Update != nil && (service.Spec.Update.Delay.Seconds != 0 || service.Spec.Update.Delay.Nanos != 0) {
+ delay, err := ptypes.Duration(&service.Spec.Update.Delay)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("invalid update delay")
+ continue
+ }
+ select {
+ case <-time.After(delay):
+ case <-u.stopChan:
+ return
+ }
+ }
+ }
+}
+
+func (u *Updater) updateTask(ctx context.Context, service *api.Service, original, updated *api.Task) error {
+ log.G(ctx).Debugf("replacing %s with %s", original.ID, updated.ID)
+ // Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
+ taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
+ Task: &api.Task{ID: updated.ID},
+ Checks: []state.TaskCheckFunc{state.TaskCheckID},
+ })
+ defer cancel()
+
+ var delayStartCh <-chan struct{}
+ // Atomically create the updated task and bring down the old one.
+ err := u.store.Update(func(tx store.Tx) error {
+ t := store.GetTask(tx, original.ID)
+ if t == nil {
+ return fmt.Errorf("task %s not found while trying to update it", original.ID)
+ }
+ t.DesiredState = api.TaskStateShutdown
+ if err := store.UpdateTask(tx, t); err != nil {
+ return err
+ }
+
+ if err := store.CreateTask(tx, updated); err != nil {
+ return err
+ }
+
+ // Wait for the old task to stop or time out, and then set the new one
+ // to RUNNING.
+ delayStartCh = u.restarts.DelayStart(ctx, tx, original, updated.ID, 0, true)
+
+ return nil
+
+ })
+ if err != nil {
+ return err
+ }
+
+ <-delayStartCh
+
+ // Wait for the new task to come up.
+ // TODO(aluzzardi): Consider adding a timeout here.
+ for {
+ select {
+ case e := <-taskUpdates:
+ updated = e.(state.EventUpdateTask).Task
+ if updated.Status.State >= api.TaskStateRunning {
+ return nil
+ }
+ case <-u.stopChan:
+ return nil
+ }
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go b/vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go
new file mode 100644
index 0000000000..86e5e080f5
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go
@@ -0,0 +1,12 @@
+package raftpicker
+
+// AddrSelector is interface which should track cluster for its leader address.
+type AddrSelector interface {
+ LeaderAddr() (string, error)
+}
+
+// RaftCluster is interface which combines useful methods for clustering.
+type RaftCluster interface {
+ AddrSelector
+ IsLeader() bool
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go b/vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go
new file mode 100644
index 0000000000..8a62de69a6
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go
@@ -0,0 +1,133 @@
+package raftpicker
+
+import (
+ "sync"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/transport"
+)
+
+// picker always picks address of cluster leader.
+type picker struct {
+ mu sync.Mutex
+ addr string
+ raft AddrSelector
+ conn *grpc.Conn
+ cc *grpc.ClientConn
+}
+
+// Init does initial processing for the Picker, e.g., initiate some connections.
+func (p *picker) Init(cc *grpc.ClientConn) error {
+ p.cc = cc
+ return nil
+}
+
+func (p *picker) initConn() error {
+ if p.conn == nil {
+ conn, err := grpc.NewConn(p.cc)
+ if err != nil {
+ return err
+ }
+ p.conn = conn
+ }
+ return nil
+}
+
+// Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC
+// or some error happens.
+func (p *picker) Pick(ctx context.Context) (transport.ClientTransport, error) {
+ p.mu.Lock()
+ if err := p.initConn(); err != nil {
+ p.mu.Unlock()
+ return nil, err
+ }
+ p.mu.Unlock()
+
+ addr, err := p.raft.LeaderAddr()
+ if err != nil {
+ return nil, err
+ }
+ p.mu.Lock()
+ if p.addr != addr {
+ p.addr = addr
+ p.conn.NotifyReset()
+ }
+ p.mu.Unlock()
+ return p.conn.Wait(ctx)
+}
+
+// PickAddr picks a peer address for connecting. This will be called repeated for
+// connecting/reconnecting.
+func (p *picker) PickAddr() (string, error) {
+ addr, err := p.raft.LeaderAddr()
+ if err != nil {
+ return "", err
+ }
+ p.mu.Lock()
+ p.addr = addr
+ p.mu.Unlock()
+ return addr, nil
+}
+
+// State returns the connectivity state of the underlying connections.
+func (p *picker) State() (grpc.ConnectivityState, error) {
+ return p.conn.State(), nil
+}
+
+// WaitForStateChange blocks until the state changes to something other than
+// the sourceState. It returns the new state or error.
+func (p *picker) WaitForStateChange(ctx context.Context, sourceState grpc.ConnectivityState) (grpc.ConnectivityState, error) {
+ return p.conn.WaitForStateChange(ctx, sourceState)
+}
+
+// Reset the current connection and force a reconnect to another address.
+func (p *picker) Reset() error {
+ p.conn.NotifyReset()
+ return nil
+}
+
+// Close closes all the Conn's owned by this Picker.
+func (p *picker) Close() error {
+ return p.conn.Close()
+}
+
+// ConnSelector is struct for obtaining connection with raftpicker.
+type ConnSelector struct {
+ mu sync.Mutex
+ cc *grpc.ClientConn
+ cluster RaftCluster
+ opts []grpc.DialOption
+}
+
+// NewConnSelector returns new ConnSelector with cluster and grpc.DialOpts which
+// will be used for Dial on first call of Conn.
+func NewConnSelector(cluster RaftCluster, opts ...grpc.DialOption) *ConnSelector {
+ return &ConnSelector{
+ cluster: cluster,
+ opts: opts,
+ }
+}
+
+// Conn returns *grpc.ClientConn with picker which picks raft cluster leader.
+// Internal connection estabilished lazily on this call.
+// It can return error if cluster wasn't ready at the moment of initial call.
+func (c *ConnSelector) Conn() (*grpc.ClientConn, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.cc != nil {
+ return c.cc, nil
+ }
+ addr, err := c.cluster.LeaderAddr()
+ if err != nil {
+ return nil, err
+ }
+ picker := &picker{raft: c.cluster, addr: addr}
+ opts := append(c.opts, grpc.WithPicker(picker))
+ cc, err := grpc.Dial(addr, opts...)
+ if err != nil {
+ return nil, err
+ }
+ c.cc = cc
+ return c.cc, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/constraint.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/constraint.go
new file mode 100644
index 0000000000..b1b6d31aa3
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/constraint.go
@@ -0,0 +1,74 @@
+package scheduler
+
+import (
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+)
+
+// ConstraintFilter selects only nodes that match certain labels.
+type ConstraintFilter struct {
+ constraints []Expr
+}
+
+// SetTask returns true when the filter is enable for a given task.
+func (f *ConstraintFilter) SetTask(t *api.Task) bool {
+ if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) > 0 {
+ constraints, err := ParseExprs(t.Spec.Placement.Constraints)
+ if err == nil {
+ f.constraints = constraints
+ return true
+ }
+ }
+ return false
+}
+
+// Check returns true if the task's constraint is supported by the given node.
+func (f *ConstraintFilter) Check(n *NodeInfo) bool {
+ for _, constraint := range f.constraints {
+ switch constraint.Key {
+ case "node.id":
+ if !constraint.Match(n.ID) {
+ return false
+ }
+ case "node.name":
+ // if this node doesn't have hostname
+ // it's equivalent to match an empty hostname
+ // where '==' would fail, '!=' matches
+ if n.Description == nil {
+ if !constraint.Match("") {
+ return false
+ }
+ continue
+ }
+ if !constraint.Match(n.Description.Hostname) {
+ return false
+ }
+ default:
+ // default is node label in form like 'node.labels.key==value'
+ // if it is not well formed, always fails it
+ if !strings.HasPrefix(constraint.Key, "node.labels.") {
+ return false
+ }
+ // if the node doesn't have any label,
+ // it's equivalent to match an empty value.
+ // that is, 'node.labels.key!=value' should pass and
+ // 'node.labels.key==value' should fail
+ if n.Spec.Annotations.Labels == nil {
+ if !constraint.Match("") {
+ return false
+ }
+ continue
+ }
+ label := constraint.Key[len("node.labels."):]
+ // if the node doesn't have this specific label,
+ // val is an empty string
+ val := n.Spec.Annotations.Labels[label]
+ if !constraint.Match(val) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/expr.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/expr.go
new file mode 100644
index 0000000000..eda9f36531
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/expr.go
@@ -0,0 +1,95 @@
+package scheduler
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+const (
+ eq = iota
+ noteq
+)
+
+var (
+ alphaNumeric = regexp.MustCompile(`^(?i)[a-z_][a-z0-9\-_.]+$`)
+ // value can be alphanumeric and some special characters. it shouldn't container
+ // current or future operators like '>, <, ~', etc.
+ valuePattern = regexp.MustCompile(`^(?i)[a-z0-9:\-_\s\.\*\(\)\?\+\[\]\\\^\$\|\/]+$`)
+
+ // operators defines list of accepted operators
+ operators = []string{"==", "!="}
+)
+
+// Expr defines a constraint
+type Expr struct {
+ Key string
+ operator int
+ exp string
+}
+
+// ParseExprs parses list of constraints into Expr list
+func ParseExprs(env []string) ([]Expr, error) {
+ exprs := []Expr{}
+ for _, e := range env {
+ found := false
+ // each expr is in the form of "key op value"
+ for i, op := range operators {
+ if !strings.Contains(e, op) {
+ continue
+ }
+ // split with the op
+ parts := strings.SplitN(e, op, 2)
+
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid expr: %s", e)
+ }
+
+ part0 := strings.TrimSpace(parts[0])
+ // validate Key
+ matched := alphaNumeric.MatchString(part0)
+ if matched == false {
+ return nil, fmt.Errorf("key '%s' is invalid", part0)
+ }
+
+ part1 := strings.TrimSpace(parts[1])
+
+ // validate Value
+ matched = valuePattern.MatchString(part1)
+ if matched == false {
+ return nil, fmt.Errorf("value '%s' is invalid", part1)
+ }
+ // TODO(dongluochen): revisit requirements to see if globing or regex are useful
+ exprs = append(exprs, Expr{Key: part0, operator: i, exp: part1})
+
+ found = true
+ break // found an op, move to next entry
+ }
+ if !found {
+ return nil, fmt.Errorf("constraint expected one operator from %s", strings.Join(operators, ", "))
+ }
+ }
+ return exprs, nil
+}
+
+// Match checks if the Expr matches the target strings.
+func (e *Expr) Match(whats ...string) bool {
+ var match bool
+
+ // full string match
+ for _, what := range whats {
+ if e.exp == what {
+ match = true
+ break
+ }
+ }
+
+ switch e.operator {
+ case eq:
+ return match
+ case noteq:
+ return !match
+ }
+
+ return false
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go
new file mode 100644
index 0000000000..c0d23d3c5c
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go
@@ -0,0 +1,131 @@
+package scheduler
+
+import "github.com/docker/swarmkit/api"
+
+// Filter checks whether the given task can run on the given node.
+// A filter may only operate
+type Filter interface {
+ // SetTask returns true when the filter is enabled for a given task
+ // and assigns the task to the filter. It returns false if the filter
+ // isn't applicable to this task. For instance, a constraints filter
+ // would return `false` if the task doesn't contain any constraints.
+ SetTask(*api.Task) bool
+
+ // Check returns true if the task assigned by SetTask can be scheduled
+ // into the given node. This function should not be called if SetTask
+ // returned false.
+ Check(*NodeInfo) bool
+}
+
+// ReadyFilter checks that the node is ready to schedule tasks.
+type ReadyFilter struct {
+}
+
+// SetTask returns true when the filter is enabled for a given task.
+func (f *ReadyFilter) SetTask(_ *api.Task) bool {
+ return true
+}
+
+// Check returns true if the task can be scheduled into the given node.
+func (f *ReadyFilter) Check(n *NodeInfo) bool {
+ return n.Status.State == api.NodeStatus_READY &&
+ n.Spec.Availability == api.NodeAvailabilityActive
+}
+
+// ResourceFilter checks that the node has enough resources available to run
+// the task.
+type ResourceFilter struct {
+ reservations *api.Resources
+}
+
+// SetTask returns true when the filter is enabled for a given task.
+func (f *ResourceFilter) SetTask(t *api.Task) bool {
+ r := t.Spec.Resources
+ if r == nil || r.Reservations == nil {
+ return false
+ }
+ if r.Reservations.NanoCPUs == 0 && r.Reservations.MemoryBytes == 0 {
+ return false
+ }
+ f.reservations = r.Reservations
+ return true
+}
+
+// Check returns true if the task can be scheduled into the given node.
+func (f *ResourceFilter) Check(n *NodeInfo) bool {
+ if f.reservations.NanoCPUs > n.AvailableResources.NanoCPUs {
+ return false
+ }
+
+ if f.reservations.MemoryBytes > n.AvailableResources.MemoryBytes {
+ return false
+ }
+
+ return true
+}
+
+// PluginFilter checks that the node has a specific volume plugin installed
+type PluginFilter struct {
+ t *api.Task
+}
+
+// SetTask returns true when the filter is enabled for a given task.
+func (f *PluginFilter) SetTask(t *api.Task) bool {
+ c := t.Spec.GetContainer()
+
+ var volumeTemplates bool
+ if c != nil {
+ for _, mount := range c.Mounts {
+ if mount.Type == api.MountTypeVolume &&
+ mount.VolumeOptions != nil &&
+ mount.VolumeOptions.DriverConfig != nil &&
+ mount.VolumeOptions.DriverConfig.Name != "" &&
+ mount.VolumeOptions.DriverConfig.Name != "local" {
+ volumeTemplates = true
+ }
+ }
+ }
+
+ if (c != nil && volumeTemplates) || len(t.Networks) > 0 {
+ f.t = t
+ return true
+ }
+
+ return false
+}
+
+// Check returns true if the task can be scheduled into the given node.
+// TODO(amitshukla): investigate storing Plugins as a map so it can be easily probed
+func (f *PluginFilter) Check(n *NodeInfo) bool {
+ // Get list of plugins on the node
+ nodePlugins := n.Description.Engine.Plugins
+
+ // Check if all volume plugins required by task are installed on node
+ container := f.t.Spec.GetContainer()
+ if container != nil {
+ for _, mount := range container.Mounts {
+ if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
+ if !f.pluginExistsOnNode("Volume", mount.VolumeOptions.DriverConfig.Name, nodePlugins) {
+ return false
+ }
+ }
+ }
+ }
+
+ // Check if all network plugins required by task are installed on node
+ for _, tn := range f.t.Networks {
+ if !f.pluginExistsOnNode("Network", tn.Network.DriverState.Name, nodePlugins) {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *PluginFilter) pluginExistsOnNode(pluginType string, pluginName string, nodePlugins []api.PluginDescription) bool {
+ for _, np := range nodePlugins {
+ if pluginType == np.Type && pluginName == np.Name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go
new file mode 100644
index 0000000000..90c44f85be
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go
@@ -0,0 +1,165 @@
+package scheduler
+
+import (
+ "container/heap"
+
+ "github.com/docker/swarmkit/api"
+)
+
+// A nodeHeap implements heap.Interface for nodes. It also includes an index
+// by node id.
+type nodeHeap struct {
+ heap []NodeInfo
+ index map[string]int // map from node id to heap index
+}
+
+func (nh nodeHeap) Len() int {
+ return len(nh.heap)
+}
+
+func (nh nodeHeap) Less(i, j int) bool {
+ return len(nh.heap[i].Tasks) < len(nh.heap[j].Tasks)
+}
+
+func (nh nodeHeap) Swap(i, j int) {
+ nh.heap[i], nh.heap[j] = nh.heap[j], nh.heap[i]
+ nh.index[nh.heap[i].ID] = i
+ nh.index[nh.heap[j].ID] = j
+}
+
+func (nh *nodeHeap) Push(x interface{}) {
+ n := len(nh.heap)
+ item := x.(NodeInfo)
+ nh.index[item.ID] = n
+ nh.heap = append(nh.heap, item)
+}
+
+func (nh *nodeHeap) Pop() interface{} {
+ old := nh.heap
+ n := len(old)
+ item := old[n-1]
+ delete(nh.index, item.ID)
+ nh.heap = old[0 : n-1]
+ return item
+}
+
+func (nh *nodeHeap) alloc(n int) {
+ nh.heap = make([]NodeInfo, 0, n)
+ nh.index = make(map[string]int, n)
+}
+
+func (nh *nodeHeap) peek() *NodeInfo {
+ if len(nh.heap) == 0 {
+ return nil
+ }
+ return &nh.heap[0]
+}
+
+// nodeInfo returns the NodeInfo struct for a given node identified by its ID.
+func (nh *nodeHeap) nodeInfo(nodeID string) NodeInfo {
+ index, ok := nh.index[nodeID]
+ if ok {
+ return nh.heap[index]
+ }
+ return NodeInfo{}
+}
+
+// addOrUpdateNode sets the number of tasks for a given node. It adds the node
+// to the heap if it wasn't already tracked.
+func (nh *nodeHeap) addOrUpdateNode(n NodeInfo) {
+ if n.Node == nil {
+ return
+ }
+ index, ok := nh.index[n.ID]
+ if ok {
+ nh.heap[index] = n
+ heap.Fix(nh, index)
+ } else {
+ heap.Push(nh, n)
+ }
+}
+
+// updateNode sets the number of tasks for a given node. It ignores the update
+// if the node isn't already tracked in the heap.
+func (nh *nodeHeap) updateNode(n NodeInfo) {
+ if n.Node == nil {
+ return
+ }
+ index, ok := nh.index[n.ID]
+ if ok {
+ nh.heap[index] = n
+ heap.Fix(nh, index)
+ }
+}
+
+func (nh *nodeHeap) remove(nodeID string) {
+ index, ok := nh.index[nodeID]
+ if ok {
+ nh.heap[index].Tasks = nil
+ heap.Fix(nh, index)
+ heap.Pop(nh)
+ }
+}
+
+func (nh *nodeHeap) findMin(meetsConstraints func(*NodeInfo) bool, scanAllNodes bool) (*api.Node, int) {
+ if scanAllNodes {
+ return nh.scanAllToFindMin(meetsConstraints)
+ }
+ return nh.searchHeapToFindMin(meetsConstraints)
+}
+
+// Scan All nodes to find the best node which meets the constraints && has lightest workloads
+func (nh *nodeHeap) scanAllToFindMin(meetsConstraints func(*NodeInfo) bool) (*api.Node, int) {
+ var bestNode *api.Node
+ minTasks := int(^uint(0) >> 1) // max int
+
+ for i := 0; i < len(nh.heap); i++ {
+ heapEntry := &nh.heap[i]
+ if meetsConstraints(heapEntry) && len(heapEntry.Tasks) < minTasks {
+ bestNode = heapEntry.Node
+ minTasks = len(heapEntry.Tasks)
+ }
+ }
+
+ return bestNode, minTasks
+}
+
+// Search in heap to find the best node which meets the constraints && has lightest workloads
+func (nh *nodeHeap) searchHeapToFindMin(meetsConstraints func(*NodeInfo) bool) (*api.Node, int) {
+ var bestNode *api.Node
+ minTasks := int(^uint(0) >> 1) // max int
+
+ if nh == nil || len(nh.heap) == 0 {
+ return bestNode, minTasks
+ }
+
+ // push root to stack for search
+ stack := []int{0}
+
+ for len(stack) != 0 {
+ // pop an element
+ idx := stack[len(stack)-1]
+ stack = stack[0 : len(stack)-1]
+
+ heapEntry := &nh.heap[idx]
+
+ if len(heapEntry.Tasks) >= minTasks {
+ continue
+ }
+
+ if meetsConstraints(heapEntry) {
+ // meet constraints, update results
+ bestNode = heapEntry.Node
+ minTasks = len(heapEntry.Tasks)
+ } else {
+ // otherwise, push 2 children to stack for further search
+ if 2*idx+1 < len(nh.heap) {
+ stack = append(stack, 2*idx+1)
+ }
+ if 2*idx+2 < len(nh.heap) {
+ stack = append(stack, 2*idx+2)
+ }
+ }
+ }
+ return bestNode, minTasks
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
new file mode 100644
index 0000000000..a6e92fa6c3
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
@@ -0,0 +1,64 @@
+package scheduler
+
+import "github.com/docker/swarmkit/api"
+
+// NodeInfo contains a node and some additional metadata.
+type NodeInfo struct {
+ *api.Node
+ Tasks map[string]*api.Task
+ AvailableResources api.Resources
+}
+
+func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api.Resources) NodeInfo {
+ nodeInfo := NodeInfo{
+ Node: n,
+ Tasks: make(map[string]*api.Task),
+ AvailableResources: availableResources,
+ }
+
+ for _, t := range tasks {
+ nodeInfo.addTask(t)
+ }
+ return nodeInfo
+}
+
+func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
+ if nodeInfo.Tasks == nil || nodeInfo.Node == nil {
+ return false
+ }
+ if _, ok := nodeInfo.Tasks[t.ID]; !ok {
+ return false
+ }
+
+ delete(nodeInfo.Tasks, t.ID)
+ reservations := taskReservations(t.Spec)
+ nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
+ nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
+
+ return true
+}
+
+func (nodeInfo *NodeInfo) addTask(t *api.Task) bool {
+ if nodeInfo.Node == nil {
+ return false
+ }
+ if nodeInfo.Tasks == nil {
+ nodeInfo.Tasks = make(map[string]*api.Task)
+ }
+ if _, ok := nodeInfo.Tasks[t.ID]; !ok {
+ nodeInfo.Tasks[t.ID] = t
+ reservations := taskReservations(t.Spec)
+ nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
+ nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
+ return true
+ }
+
+ return false
+}
+
+func taskReservations(spec api.TaskSpec) (reservations api.Resources) {
+ if spec.Resources != nil && spec.Resources.Reservations != nil {
+ reservations = *spec.Resources.Reservations
+ }
+ return
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/pipeline.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/pipeline.go
new file mode 100644
index 0000000000..517319376c
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/pipeline.go
@@ -0,0 +1,54 @@
+package scheduler
+
+import "github.com/docker/swarmkit/api"
+
+var (
+ defaultFilters = []Filter{
+ // Always check for readiness first.
+ &ReadyFilter{},
+ &ResourceFilter{},
+ &PluginFilter{},
+ &ConstraintFilter{},
+ }
+)
+
+type checklistEntry struct {
+ f Filter
+ enabled bool
+}
+
+// Pipeline runs a set of filters against nodes.
+type Pipeline struct {
+ checklist []checklistEntry
+}
+
+// NewPipeline returns a pipeline with the default set of filters.
+func NewPipeline() *Pipeline {
+ p := &Pipeline{}
+
+ for _, f := range defaultFilters {
+ p.checklist = append(p.checklist, checklistEntry{f: f})
+ }
+
+ return p
+}
+
+// Process a node through the filter pipeline.
+// Returns true if all filters pass, false otherwise.
+func (p *Pipeline) Process(n *NodeInfo) bool {
+ for _, entry := range p.checklist {
+ if entry.enabled && !entry.f.Check(n) {
+ // Immediately stop on first failure.
+ return false
+ }
+ }
+ return true
+}
+
+// SetTask sets up the filters to process a new task. Once this is called,
+// Process can be called repeatedly to try to assign the task various nodes.
+func (p *Pipeline) SetTask(t *api.Task) {
+ for i := range p.checklist {
+ p.checklist[i].enabled = p.checklist[i].f.SetTask(t)
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go
new file mode 100644
index 0000000000..5b4a64c3db
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go
@@ -0,0 +1,433 @@
+package scheduler
+
+import (
+ "container/heap"
+ "container/list"
+ "time"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ "golang.org/x/net/context"
+)
+
+type schedulingDecision struct {
+ old *api.Task
+ new *api.Task
+}
+
+// Scheduler assigns tasks to nodes.
+type Scheduler struct {
+ store *store.MemoryStore
+ unassignedTasks *list.List
+ // preassignedTasks already have NodeID, need resource validation
+ preassignedTasks map[string]*api.Task
+ nodeHeap nodeHeap
+ allTasks map[string]*api.Task
+ pipeline *Pipeline
+
+ // stopChan signals to the state machine to stop running
+ stopChan chan struct{}
+ // doneChan is closed when the state machine terminates
+ doneChan chan struct{}
+
+ // This currently exists only for benchmarking. It tells the scheduler
+ // scan the whole heap instead of taking the minimum-valued node
+ // blindly.
+ scanAllNodes bool
+}
+
+// New creates a new scheduler.
+func New(store *store.MemoryStore) *Scheduler {
+ return &Scheduler{
+ store: store,
+ unassignedTasks: list.New(),
+ preassignedTasks: make(map[string]*api.Task),
+ allTasks: make(map[string]*api.Task),
+ stopChan: make(chan struct{}),
+ doneChan: make(chan struct{}),
+ pipeline: NewPipeline(),
+ }
+}
+
+func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
+ tasks, err := store.FindTasks(tx, store.All)
+ if err != nil {
+ return err
+ }
+
+ tasksByNode := make(map[string]map[string]*api.Task)
+ for _, t := range tasks {
+ // Ignore all tasks that have not reached ALLOCATED
+ // state.
+ if t.Status.State < api.TaskStateAllocated {
+ continue
+ }
+
+ s.allTasks[t.ID] = t
+ if t.NodeID == "" {
+ s.enqueue(t)
+ continue
+ }
+ // preassigned tasks need to validate resource requirement on corresponding node
+ if t.Status.State == api.TaskStateAllocated {
+ s.preassignedTasks[t.ID] = t
+ continue
+ }
+
+ if tasksByNode[t.NodeID] == nil {
+ tasksByNode[t.NodeID] = make(map[string]*api.Task)
+ }
+ tasksByNode[t.NodeID][t.ID] = t
+ }
+
+ if err := s.buildNodeHeap(tx, tasksByNode); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Run is the scheduler event loop.
+func (s *Scheduler) Run(ctx context.Context) error {
+ defer close(s.doneChan)
+
+ updates, cancel, err := store.ViewAndWatch(s.store, s.setupTasksList)
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("snapshot store update failed")
+ return err
+ }
+ defer cancel()
+
+ // Validate resource for tasks from preassigned tasks
+ // do this before other tasks because preassigned tasks like
+ // global service should start before other tasks
+ s.processPreassignedTasks(ctx)
+
+ // Queue all unassigned tasks before processing changes.
+ s.tick(ctx)
+
+ pendingChanges := 0
+
+ // Watch for changes.
+ for {
+ select {
+ case event := <-updates:
+ switch v := event.(type) {
+ case state.EventCreateTask:
+ pendingChanges += s.createTask(ctx, v.Task)
+ case state.EventUpdateTask:
+ pendingChanges += s.updateTask(ctx, v.Task)
+ case state.EventDeleteTask:
+ s.deleteTask(ctx, v.Task)
+ case state.EventCreateNode:
+ s.createOrUpdateNode(v.Node)
+ pendingChanges++
+ case state.EventUpdateNode:
+ s.createOrUpdateNode(v.Node)
+ pendingChanges++
+ case state.EventDeleteNode:
+ s.nodeHeap.remove(v.Node.ID)
+ case state.EventCommit:
+ if len(s.preassignedTasks) > 0 {
+ s.processPreassignedTasks(ctx)
+ }
+ if pendingChanges > 0 {
+ s.tick(ctx)
+ pendingChanges = 0
+ }
+ }
+
+ case <-s.stopChan:
+ return nil
+ }
+ }
+}
+
+// Stop causes the scheduler event loop to stop running.
+func (s *Scheduler) Stop() {
+ close(s.stopChan)
+ <-s.doneChan
+}
+
+// enqueue queues a task for scheduling.
+func (s *Scheduler) enqueue(t *api.Task) {
+ s.unassignedTasks.PushBack(t)
+}
+
+func (s *Scheduler) createTask(ctx context.Context, t *api.Task) int {
+ // Ignore all tasks that have not reached ALLOCATED
+ // state, and tasks that no longer consume resources.
+ if t.Status.State < api.TaskStateAllocated || t.Status.State > api.TaskStateRunning {
+ return 0
+ }
+
+ s.allTasks[t.ID] = t
+ if t.NodeID == "" {
+ // unassigned task
+ s.enqueue(t)
+ return 1
+ }
+
+ if t.Status.State == api.TaskStateAllocated {
+ s.preassignedTasks[t.ID] = t
+ // preassigned tasks do not contribute to running tasks count
+ return 0
+ }
+
+ nodeInfo := s.nodeHeap.nodeInfo(t.NodeID)
+ if nodeInfo.addTask(t) {
+ s.nodeHeap.updateNode(nodeInfo)
+ }
+
+ return 0
+}
+
+func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
+ // Ignore all tasks that have not reached ALLOCATED
+ // state.
+ if t.Status.State < api.TaskStateAllocated {
+ return 0
+ }
+
+ oldTask := s.allTasks[t.ID]
+
+ // Ignore all tasks that have not reached ALLOCATED
+ // state, and tasks that no longer consume resources.
+ if t.Status.State > api.TaskStateRunning {
+ if oldTask != nil {
+ s.deleteTask(ctx, oldTask)
+ }
+ return 1
+ }
+
+ if t.NodeID == "" {
+ // unassigned task
+ if oldTask != nil {
+ s.deleteTask(ctx, oldTask)
+ }
+ s.allTasks[t.ID] = t
+ s.enqueue(t)
+ return 1
+ }
+
+ if t.Status.State == api.TaskStateAllocated {
+ if oldTask != nil {
+ s.deleteTask(ctx, oldTask)
+ }
+ s.allTasks[t.ID] = t
+ s.preassignedTasks[t.ID] = t
+ // preassigned tasks do not contribute to running tasks count
+ return 0
+ }
+
+ s.allTasks[t.ID] = t
+ nodeInfo := s.nodeHeap.nodeInfo(t.NodeID)
+ if nodeInfo.addTask(t) {
+ s.nodeHeap.updateNode(nodeInfo)
+ }
+
+ return 0
+}
+
+func (s *Scheduler) deleteTask(ctx context.Context, t *api.Task) {
+ delete(s.allTasks, t.ID)
+ delete(s.preassignedTasks, t.ID)
+ nodeInfo := s.nodeHeap.nodeInfo(t.NodeID)
+ if nodeInfo.removeTask(t) {
+ s.nodeHeap.updateNode(nodeInfo)
+ }
+}
+
+func (s *Scheduler) createOrUpdateNode(n *api.Node) {
+ var resources api.Resources
+ if n.Description != nil && n.Description.Resources != nil {
+ resources = *n.Description.Resources
+ }
+ nodeInfo := s.nodeHeap.nodeInfo(n.ID)
+ nodeInfo.Node = n
+ nodeInfo.AvailableResources = resources
+ s.nodeHeap.addOrUpdateNode(nodeInfo)
+}
+
+func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
+ schedulingDecisions := make(map[string]schedulingDecision, len(s.preassignedTasks))
+ for _, t := range s.preassignedTasks {
+ newT := s.taskFitNode(ctx, t, t.NodeID)
+ if newT == nil {
+ continue
+ }
+ schedulingDecisions[t.ID] = schedulingDecision{old: t, new: newT}
+ }
+
+ successful, failed := s.applySchedulingDecisions(ctx, schedulingDecisions)
+
+ for _, decision := range successful {
+ delete(s.preassignedTasks, decision.old.ID)
+ }
+ for _, decision := range failed {
+ s.allTasks[decision.old.ID] = decision.old
+ nodeInfo := s.nodeHeap.nodeInfo(decision.new.NodeID)
+ nodeInfo.removeTask(decision.new)
+ s.nodeHeap.updateNode(nodeInfo)
+ }
+}
+
+// tick attempts to schedule the queue.
+func (s *Scheduler) tick(ctx context.Context) {
+ schedulingDecisions := make(map[string]schedulingDecision, s.unassignedTasks.Len())
+
+ var next *list.Element
+ for e := s.unassignedTasks.Front(); e != nil; e = next {
+ next = e.Next()
+ id := e.Value.(*api.Task).ID
+ if _, ok := schedulingDecisions[id]; ok {
+ s.unassignedTasks.Remove(e)
+ continue
+ }
+ t := s.allTasks[e.Value.(*api.Task).ID]
+ if t == nil || t.NodeID != "" {
+ // task deleted or already assigned
+ s.unassignedTasks.Remove(e)
+ continue
+ }
+ if newT := s.scheduleTask(ctx, t); newT != nil {
+ schedulingDecisions[id] = schedulingDecision{old: t, new: newT}
+ s.unassignedTasks.Remove(e)
+ }
+ }
+
+ _, failed := s.applySchedulingDecisions(ctx, schedulingDecisions)
+ for _, decision := range failed {
+ s.allTasks[decision.old.ID] = decision.old
+
+ nodeInfo := s.nodeHeap.nodeInfo(decision.new.NodeID)
+ nodeInfo.removeTask(decision.new)
+ s.nodeHeap.updateNode(nodeInfo)
+
+ // enqueue task for next scheduling attempt
+ s.enqueue(decision.old)
+ }
+}
+
+func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDecisions map[string]schedulingDecision) (successful, failed []schedulingDecision) {
+ if len(schedulingDecisions) == 0 {
+ return
+ }
+
+ successful = make([]schedulingDecision, 0, len(schedulingDecisions))
+
+ // Apply changes to master store
+ applied, err := s.store.Batch(func(batch *store.Batch) error {
+ for len(schedulingDecisions) > 0 {
+ err := batch.Update(func(tx store.Tx) error {
+ // Update exactly one task inside this Update
+ // callback.
+ for taskID, decision := range schedulingDecisions {
+ delete(schedulingDecisions, taskID)
+
+ t := store.GetTask(tx, taskID)
+ if t == nil {
+ // Task no longer exists. Do nothing.
+ failed = append(failed, decision)
+ continue
+ }
+
+ if err := store.UpdateTask(tx, decision.new); err != nil {
+ log.G(ctx).Debugf("scheduler failed to update task %s; will retry", taskID)
+ failed = append(failed, decision)
+ continue
+ }
+ successful = append(successful, decision)
+ return nil
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ if err != nil {
+ log.G(ctx).WithError(err).Error("scheduler tick transaction failed")
+ failed = append(failed, successful[applied:]...)
+ successful = successful[:applied]
+ }
+ return
+}
+
+// taskFitNode checks if a node has enough resource to accommodate a task
+func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string) *api.Task {
+ nodeInfo := s.nodeHeap.nodeInfo(nodeID)
+ s.pipeline.SetTask(t)
+ if !s.pipeline.Process(&nodeInfo) {
+ // this node cannot accommodate this task
+ return nil
+ }
+ newT := *t
+ newT.Status = api.TaskStatus{
+ State: api.TaskStateAssigned,
+ Timestamp: ptypes.MustTimestampProto(time.Now()),
+ Message: "scheduler confirmed task can run on preassigned node",
+ }
+ s.allTasks[t.ID] = &newT
+
+ if nodeInfo.addTask(&newT) {
+ s.nodeHeap.updateNode(nodeInfo)
+ }
+ return &newT
+}
+
+// scheduleTask schedules a single task.
+func (s *Scheduler) scheduleTask(ctx context.Context, t *api.Task) *api.Task {
+ s.pipeline.SetTask(t)
+ n, _ := s.nodeHeap.findMin(s.pipeline.Process, s.scanAllNodes)
+ if n == nil {
+ log.G(ctx).WithField("task.id", t.ID).Debug("No suitable node available for task")
+ return nil
+ }
+
+ log.G(ctx).WithField("task.id", t.ID).Debugf("Assigning to node %s", n.ID)
+ newT := *t
+ newT.NodeID = n.ID
+ newT.Status = api.TaskStatus{
+ State: api.TaskStateAssigned,
+ Timestamp: ptypes.MustTimestampProto(time.Now()),
+ Message: "scheduler assigned task to node",
+ }
+ s.allTasks[t.ID] = &newT
+
+ nodeInfo := s.nodeHeap.nodeInfo(n.ID)
+ if nodeInfo.addTask(&newT) {
+ s.nodeHeap.updateNode(nodeInfo)
+ }
+ return &newT
+}
+
+func (s *Scheduler) buildNodeHeap(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
+ nodes, err := store.FindNodes(tx, store.All)
+ if err != nil {
+ return err
+ }
+
+ s.nodeHeap.alloc(len(nodes))
+
+ i := 0
+ for _, n := range nodes {
+ var resources api.Resources
+ if n.Description != nil && n.Description.Resources != nil {
+ resources = *n.Description.Resources
+ }
+ s.nodeHeap.heap = append(s.nodeHeap.heap, newNodeInfo(n, tasksByNode[n.ID], resources))
+ s.nodeHeap.index[n.ID] = i
+ i++
+ }
+
+ heap.Init(&s.nodeHeap)
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/doc.go b/vendor/src/github.com/docker/swarmkit/manager/state/doc.go
new file mode 100644
index 0000000000..50748337d5
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/doc.go
@@ -0,0 +1,32 @@
+// Package state provides interfaces to work with swarm cluster state.
+//
+// The primary interface is Store, which abstracts storage of this cluster
+// state. Store exposes a transactional interface for both reads and writes.
+// To perform a read transaction, View accepts a callback function that it
+// will invoke with a ReadTx object that gives it a consistent view of the
+// state. Similarly, Update accepts a callback function that it will invoke with
+// a Tx object that allows reads and writes to happen without interference from
+// other transactions.
+//
+// This is an example of making an update to a Store:
+//
+// err := store.Update(func(tx state.Tx) {
+// if err := tx.Nodes().Update(newNode); err != nil {
+// reutrn err
+// }
+// return nil
+// })
+// if err != nil {
+// return fmt.Errorf("transaction failed: %v", err)
+// }
+//
+// WatchableStore is a version of Store that exposes watch functionality.
+// These expose a publish/subscribe queue where code can subscribe to
+// changes of interest. This can be combined with the ViewAndWatch function to
+// "fork" a store, by making a snapshot and then applying future changes
+// to keep the copy in sync. This approach lets consumers of the data
+// use their own data structures and implement their own concurrency
+// strategies. It can lead to more efficient code because data consumers
+// don't necessarily have to lock the main data store if they are
+// maintaining their own copies of the state.
+package state
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/proposer.go b/vendor/src/github.com/docker/swarmkit/manager/state/proposer.go
new file mode 100644
index 0000000000..9e62745bfb
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/proposer.go
@@ -0,0 +1,17 @@
+package state
+
+import (
+ "github.com/docker/swarmkit/api"
+ "golang.org/x/net/context"
+)
+
+// A Proposer can propose actions to a cluster.
+type Proposer interface {
+ // ProposeValue adds storeAction to the distributed log. If this
+ // completes successfully, ProposeValue calls cb to commit the
+ // proposed changes. The callback is necessary for the Proposer to make
+ // sure that the changes are committed before it interacts further
+ // with the store.
+ ProposeValue(ctx context.Context, storeAction []*api.StoreAction, cb func()) error
+ GetVersion() *api.Version
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go b/vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go
new file mode 100644
index 0000000000..5ffb943f09
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go
@@ -0,0 +1,208 @@
+package membership
+
+import (
+ "errors"
+ "sync"
+
+ "google.golang.org/grpc"
+
+ "github.com/coreos/etcd/raft/raftpb"
+ "github.com/docker/swarmkit/api"
+ "github.com/gogo/protobuf/proto"
+)
+
+var (
+ // ErrIDExists is thrown when a node wants to join the existing cluster but its ID already exists
+ ErrIDExists = errors.New("membership: can't add node to cluster, node id is a duplicate")
+ // ErrIDRemoved is thrown when a node tries to perform an operation on an existing cluster but was removed
+ ErrIDRemoved = errors.New("membership: node was removed during cluster lifetime")
+ // ErrIDNotFound is thrown when we try an operation on a member that does not exist in the cluster list
+ ErrIDNotFound = errors.New("membership: member not found in cluster list")
+ // ErrConfigChangeInvalid is thrown when a configuration change we received looks invalid in form
+ ErrConfigChangeInvalid = errors.New("membership: ConfChange type should be either AddNode, RemoveNode or UpdateNode")
+ // ErrCannotUnmarshalConfig is thrown when a node cannot unmarshal a configuration change
+ ErrCannotUnmarshalConfig = errors.New("membership: cannot unmarshal configuration change")
+)
+
+// Cluster represents a set of active
+// raft Members
+type Cluster struct {
+ id uint64
+
+ mu sync.RWMutex
+ members map[uint64]*Member
+
+ // removed contains the list of removed Members,
+ // those ids cannot be reused
+ removed map[uint64]bool
+}
+
+// Member represents a raft Cluster Member
+type Member struct {
+ *api.RaftMember
+
+ api.RaftClient
+ Conn *grpc.ClientConn
+}
+
+// NewCluster creates a new Cluster neighbors
+// list for a raft Member
+func NewCluster() *Cluster {
+ // TODO(abronan): generate Cluster ID for federation
+
+ return &Cluster{
+ members: make(map[uint64]*Member),
+ removed: make(map[uint64]bool),
+ }
+}
+
+// Members returns the list of raft Members in the Cluster.
+func (c *Cluster) Members() map[uint64]*Member {
+ members := make(map[uint64]*Member)
+ c.mu.RLock()
+ for k, v := range c.members {
+ members[k] = v
+ }
+ c.mu.RUnlock()
+ return members
+}
+
+// Removed returns the list of raft Members removed from the Cluster.
+func (c *Cluster) Removed() []uint64 {
+ c.mu.RLock()
+ removed := make([]uint64, 0, len(c.removed))
+ for k := range c.removed {
+ removed = append(removed, k)
+ }
+ c.mu.RUnlock()
+ return removed
+}
+
+// GetMember returns informations on a given Member.
+func (c *Cluster) GetMember(id uint64) *Member {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.members[id]
+}
+
+// AddMember adds a node to the Cluster Memberlist.
+func (c *Cluster) AddMember(member *Member) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.removed[member.RaftID] {
+ return ErrIDRemoved
+ }
+
+ c.members[member.RaftID] = member
+ return nil
+}
+
+// RemoveMember removes a node from the Cluster Memberlist.
+func (c *Cluster) RemoveMember(id uint64) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.members[id] == nil {
+ return ErrIDNotFound
+ }
+
+ conn := c.members[id].Conn
+ if conn != nil {
+ _ = conn.Close()
+ }
+
+ c.removed[id] = true
+ delete(c.members, id)
+ return nil
+}
+
+// IsIDRemoved checks if a Member is in the remove set.
+func (c *Cluster) IsIDRemoved(id uint64) bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.removed[id]
+}
+
+// Clear resets the list of active Members and removed Members.
+func (c *Cluster) Clear() {
+ c.mu.Lock()
+ c.members = make(map[uint64]*Member)
+ c.removed = make(map[uint64]bool)
+ c.mu.Unlock()
+}
+
+// ValidateConfigurationChange takes a proposed ConfChange and
+// ensures that it is valid.
+func (c *Cluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.removed[cc.NodeID] {
+ return ErrIDRemoved
+ }
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode:
+ if c.members[cc.NodeID] != nil {
+ return ErrIDExists
+ }
+ case raftpb.ConfChangeRemoveNode:
+ if c.members[cc.NodeID] == nil {
+ return ErrIDNotFound
+ }
+ case raftpb.ConfChangeUpdateNode:
+ if c.members[cc.NodeID] == nil {
+ return ErrIDNotFound
+ }
+ default:
+ return ErrConfigChangeInvalid
+ }
+ m := &api.RaftMember{}
+ if err := proto.Unmarshal(cc.Context, m); err != nil {
+ return ErrCannotUnmarshalConfig
+ }
+ return nil
+}
+
+// CanRemoveMember checks if removing a Member would not result in a loss
+// of quorum, this check is needed before submitting a configuration change
+// that might block or harm the Cluster on Member recovery
+func (c *Cluster) CanRemoveMember(from uint64, id uint64) bool {
+ members := c.Members()
+
+ nmembers := 0
+ nreachable := 0
+
+ for _, m := range members {
+ // Skip the node that is going to be deleted
+ if m.RaftID == id {
+ continue
+ }
+
+ // Local node from where the remove is issued
+ if m.RaftID == from {
+ nmembers++
+ nreachable++
+ continue
+ }
+
+ connState, err := m.Conn.State()
+ if err == nil && connState == grpc.Ready {
+ nreachable++
+ }
+
+ nmembers++
+ }
+
+ // Special case of 2 managers
+ if nreachable == 1 && len(members) <= 2 {
+ return false
+ }
+
+ nquorum := nmembers/2 + 1
+ if nreachable < nquorum {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go b/vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go
new file mode 100644
index 0000000000..5ddd356142
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go
@@ -0,0 +1,1161 @@
+package raft
+
+import (
+ "errors"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+
+ "golang.org/x/net/context"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/coreos/etcd/pkg/idutil"
+ "github.com/coreos/etcd/raft"
+ "github.com/coreos/etcd/raft/raftpb"
+ "github.com/coreos/etcd/snap"
+ "github.com/coreos/etcd/wal"
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/ca"
+ "github.com/docker/swarmkit/identity"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state/raft/membership"
+ "github.com/docker/swarmkit/manager/state/store"
+ "github.com/gogo/protobuf/proto"
+ "github.com/pivotal-golang/clock"
+)
+
+var (
+ // ErrConfChangeRefused is returned when there is an issue with the configuration change
+ ErrConfChangeRefused = errors.New("raft: propose configuration change refused")
+ // ErrApplyNotSpecified is returned during the creation of a raft node when no apply method was provided
+ ErrApplyNotSpecified = errors.New("raft: apply method was not specified")
+ // ErrAppendEntry is thrown when the node fail to append an entry to the logs
+ ErrAppendEntry = errors.New("raft: failed to append entry to logs")
+ // ErrSetHardState is returned when the node fails to set the hard state
+ ErrSetHardState = errors.New("raft: failed to set the hard state for log append entry")
+ // ErrApplySnapshot is returned when the node fails to apply a snapshot
+ ErrApplySnapshot = errors.New("raft: failed to apply snapshot on raft node")
+ // ErrStopped is returned when an operation was submitted but the node was stopped in the meantime
+ ErrStopped = errors.New("raft: failed to process the request: node is stopped")
+ // ErrLostLeadership is returned when an operation was submitted but the node lost leader status before it became committed
+ ErrLostLeadership = errors.New("raft: failed to process the request: node lost leader status")
+ // ErrRequestTooLarge is returned when a raft internal message is too large to be sent
+ ErrRequestTooLarge = errors.New("raft: raft message is too large and can't be sent")
+ // ErrCannotRemoveMember is thrown when we try to remove a member from the cluster but this would result in a loss of quorum
+ ErrCannotRemoveMember = errors.New("raft: member cannot be removed, because removing it may result in loss of quorum")
+ // ErrMemberRemoved is thrown when a node was removed from the cluster
+ ErrMemberRemoved = errors.New("raft: member was removed from the cluster")
+ // ErrNoClusterLeader is thrown when the cluster has no elected leader
+ ErrNoClusterLeader = errors.New("raft: no elected cluster leader")
+)
+
+// LeadershipState indicates whether the node is a leader or follower.
+type LeadershipState int
+
+const (
+ // IsLeader indicates that the node is a raft leader.
+ IsLeader LeadershipState = iota
+ // IsFollower indicates that the node is a raft follower.
+ IsFollower
+)
+
+// Node represents the Raft Node useful
+// configuration.
+type Node struct {
+ raft.Node
+ cluster *membership.Cluster
+
+ Server *grpc.Server
+ Ctx context.Context
+ cancel func()
+ tlsCredentials credentials.TransportAuthenticator
+
+ Address string
+ StateDir string
+ Error error
+
+ raftStore *raft.MemoryStorage
+ memoryStore *store.MemoryStore
+ Config *raft.Config
+ reqIDGen *idutil.Generator
+ wait *wait
+ wal *wal.WAL
+ snapshotter *snap.Snapshotter
+ wasLeader bool
+ removed uint32
+ joinAddr string
+
+ // forceNewCluster is a special flag used to recover from disaster
+ // scenario by pointing to an existing or backed up data directory.
+ forceNewCluster bool
+
+ confState raftpb.ConfState
+ appliedIndex uint64
+ snapshotIndex uint64
+
+ ticker clock.Ticker
+ sendTimeout time.Duration
+ stopCh chan struct{}
+ doneCh chan struct{}
+ leadershipBroadcast *events.Broadcaster
+
+ startNodePeers []raft.Peer
+
+ // used to coordinate shutdown
+ stopMu sync.RWMutex
+
+ snapshotInProgress chan uint64
+ asyncTasks sync.WaitGroup
+}
+
+// NewNodeOptions provides arguments for NewNode
+type NewNodeOptions struct {
+ // ID is the node's ID, from its certificate's CN field.
+ ID string
+ // Addr is the address of this node's listener
+ Addr string
+ // ForceNewCluster defines if we have to force a new cluster
+ // because we are recovering from a backup data directory.
+ ForceNewCluster bool
+ // JoinAddr is the cluster to join. May be an empty string to create
+ // a standalone cluster.
+ JoinAddr string
+ // Config is the raft config.
+ Config *raft.Config
+ // StateDir is the directory to store durable state.
+ StateDir string
+ // TickInterval interval is the time interval between raft ticks.
+ TickInterval time.Duration
+ // ClockSource is a Clock interface to use as a time base.
+ // Leave this nil except for tests that are designed not to run in real
+ // time.
+ ClockSource clock.Clock
+ // SendTimeout is the timeout on the sending messages to other raft
+ // nodes. Leave this as 0 to get the default value.
+ SendTimeout time.Duration
+ TLSCredentials credentials.TransportAuthenticator
+}
+
+// NewNode generates a new Raft node
+func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
+ cfg := opts.Config
+ if cfg == nil {
+ cfg = DefaultNodeConfig()
+ }
+ if opts.TickInterval == 0 {
+ opts.TickInterval = time.Second
+ }
+
+ raftID, err := identity.ParseNodeID(opts.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ raftStore := raft.NewMemoryStorage()
+
+ ctx, cancel := context.WithCancel(ctx)
+
+ n := &Node{
+ Ctx: ctx,
+ cancel: cancel,
+ cluster: membership.NewCluster(),
+ tlsCredentials: opts.TLSCredentials,
+ raftStore: raftStore,
+ Address: opts.Addr,
+ Config: &raft.Config{
+ ElectionTick: cfg.ElectionTick,
+ HeartbeatTick: cfg.HeartbeatTick,
+ Storage: raftStore,
+ MaxSizePerMsg: cfg.MaxSizePerMsg,
+ MaxInflightMsgs: cfg.MaxInflightMsgs,
+ Logger: cfg.Logger,
+ ID: raftID,
+ },
+ forceNewCluster: opts.ForceNewCluster,
+ stopCh: make(chan struct{}),
+ doneCh: make(chan struct{}),
+ StateDir: opts.StateDir,
+ joinAddr: opts.JoinAddr,
+ sendTimeout: 2 * time.Second,
+ leadershipBroadcast: events.NewBroadcaster(),
+ }
+ n.memoryStore = store.NewMemoryStore(n)
+
+ if opts.ClockSource == nil {
+ n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
+ } else {
+ n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
+ }
+ if opts.SendTimeout != 0 {
+ n.sendTimeout = opts.SendTimeout
+ }
+
+ if err := n.loadAndStart(ctx, opts.ForceNewCluster); err != nil {
+ n.ticker.Stop()
+ return nil, err
+ }
+
+ snapshot, err := raftStore.Snapshot()
+ // Snapshot never returns an error
+ if err != nil {
+ panic("could not get snapshot of raft store")
+ }
+
+ n.confState = snapshot.Metadata.ConfState
+ n.appliedIndex = snapshot.Metadata.Index
+ n.snapshotIndex = snapshot.Metadata.Index
+ n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
+ n.wait = newWait()
+
+ if n.startNodePeers != nil {
+ if n.joinAddr != "" {
+ c, err := n.ConnectToMember(n.joinAddr, 10*time.Second)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ _ = c.Conn.Close()
+ }()
+
+ ctx, cancel := context.WithTimeout(n.Ctx, 10*time.Second)
+ defer cancel()
+ resp, err := c.Join(ctx, &api.JoinRequest{
+ Addr: n.Address,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ n.Node = raft.StartNode(n.Config, []raft.Peer{})
+
+ if err := n.registerNodes(resp.Members); err != nil {
+ return nil, err
+ }
+ } else {
+ n.Node = raft.StartNode(n.Config, n.startNodePeers)
+ if err := n.Campaign(n.Ctx); err != nil {
+ return nil, err
+ }
+ }
+ return n, nil
+ }
+
+ if n.joinAddr != "" {
+ n.Config.Logger.Warning("ignoring request to join cluster, because raft state already exists")
+ }
+ n.Node = raft.RestartNode(n.Config)
+ return n, nil
+}
+
+// DefaultNodeConfig returns the default config for a
+// raft node that can be modified and customized
+func DefaultNodeConfig() *raft.Config {
+ return &raft.Config{
+ HeartbeatTick: 1,
+ ElectionTick: 3,
+ MaxSizePerMsg: math.MaxUint16,
+ MaxInflightMsgs: 256,
+ Logger: log.L,
+ }
+}
+
+// DefaultRaftConfig returns a default api.RaftConfig.
+func DefaultRaftConfig() api.RaftConfig {
+ return api.RaftConfig{
+ KeepOldSnapshots: 0,
+ SnapshotInterval: 10000,
+ LogEntriesForSlowFollowers: 500,
+ ElectionTick: 3,
+ HeartbeatTick: 1,
+ }
+}
+
+// MemoryStore returns the memory store that is kept in sync with the raft log.
+func (n *Node) MemoryStore() *store.MemoryStore {
+ return n.memoryStore
+}
+
+// Run is the main loop for a Raft node, it goes along the state machine,
+// acting on the messages received from other Raft nodes in the cluster.
+//
+// Before running the main loop, it first starts the raft node based on saved
+// cluster state. If no saved state exists, it starts a single-node cluster.
+func (n *Node) Run(ctx context.Context) error {
+ defer func() {
+ close(n.doneCh)
+ }()
+
+ for {
+ select {
+ case <-n.ticker.C():
+ n.Tick()
+
+ case rd := <-n.Ready():
+ raftConfig := DefaultRaftConfig()
+ n.memoryStore.View(func(readTx store.ReadTx) {
+ clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
+ if err == nil && len(clusters) == 1 {
+ raftConfig = clusters[0].Spec.Raft
+ }
+ })
+
+ // Save entries to storage
+ if err := n.saveToStorage(&raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil {
+ n.Config.Logger.Error(err)
+ }
+
+ // Send raft messages to peers
+ if err := n.send(rd.Messages); err != nil {
+ n.Config.Logger.Error(err)
+ }
+
+ // Apply snapshot to memory store. The snapshot
+ // was applied to the raft store in
+ // saveToStorage.
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // Load the snapshot data into the store
+ if err := n.restoreFromSnapshot(rd.Snapshot.Data, n.forceNewCluster); err != nil {
+ n.Config.Logger.Error(err)
+ }
+ n.appliedIndex = rd.Snapshot.Metadata.Index
+ n.snapshotIndex = rd.Snapshot.Metadata.Index
+ n.confState = rd.Snapshot.Metadata.ConfState
+ }
+
+ // Process committed entries
+ for _, entry := range rd.CommittedEntries {
+ if err := n.processCommitted(entry); err != nil {
+ n.Config.Logger.Error(err)
+ }
+ }
+
+ // Trigger a snapshot every once in awhile
+ if n.snapshotInProgress == nil &&
+ raftConfig.SnapshotInterval > 0 &&
+ n.appliedIndex-n.snapshotIndex >= raftConfig.SnapshotInterval {
+ n.doSnapshot(&raftConfig)
+ }
+
+ // If we cease to be the leader, we must cancel
+ // any proposals that are currently waiting for
+ // a quorum to acknowledge them. It is still
+ // possible for these to become committed, but
+ // if that happens we will apply them as any
+ // follower would.
+ if rd.SoftState != nil {
+ if n.wasLeader && rd.SoftState.RaftState != raft.StateLeader {
+ n.wasLeader = false
+ n.wait.cancelAll()
+ n.leadershipBroadcast.Write(IsFollower)
+ } else if !n.wasLeader && rd.SoftState.RaftState == raft.StateLeader {
+ n.wasLeader = true
+ n.leadershipBroadcast.Write(IsLeader)
+ }
+ }
+
+ // If the node was removed from other members,
+ // send back an error to the caller to start
+ // the shutdown process.
+ if n.mustStop() {
+ return ErrMemberRemoved
+ }
+
+ // Advance the state machine
+ n.Advance()
+
+ case snapshotIndex := <-n.snapshotInProgress:
+ if snapshotIndex > n.snapshotIndex {
+ n.snapshotIndex = snapshotIndex
+ }
+ n.snapshotInProgress = nil
+ case <-n.stopCh:
+ n.stop()
+ return nil
+ }
+ }
+}
+
+// Shutdown stops the raft node processing loop.
+// Calling Shutdown on an already stopped node
+// will result in a panic.
+func (n *Node) Shutdown() {
+ select {
+ case <-n.doneCh:
+ n.stop()
+ default:
+ close(n.stopCh)
+ <-n.doneCh
+ }
+}
+
+func (n *Node) stop() {
+ n.stopMu.Lock()
+ defer n.stopMu.Unlock()
+
+ n.cancel()
+ n.asyncTasks.Wait()
+
+ members := n.cluster.Members()
+ for _, member := range members {
+ if member.Conn != nil {
+ _ = member.Conn.Close()
+ }
+ }
+ n.Stop()
+ if err := n.wal.Close(); err != nil {
+ n.Config.Logger.Error(err)
+ }
+ // TODO(stevvooe): Handle ctx.Done()
+}
+
+// IsLeader checks if we are the leader or not
+func (n *Node) IsLeader() bool {
+ if n.Node.Status().Lead == n.Config.ID {
+ return true
+ }
+ return false
+}
+
+// Leader returns the id of the leader
+func (n *Node) Leader() uint64 {
+ return n.Node.Status().Lead
+}
+
+// Join asks to a member of the raft to propose
+// a configuration change and add us as a member thus
+// beginning the log replication process. This method
+// is called from an aspiring member to an existing member
+func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinResponse, error) {
+ nodeInfo, err := ca.RemoteNode(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ fields := logrus.Fields{
+ "node.id": nodeInfo.NodeID,
+ "method": "(*Node).Join",
+ }
+ if nodeInfo.ForwardedBy != nil {
+ fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+ }
+ log := log.G(ctx).WithFields(fields)
+
+ raftID, err := identity.ParseNodeID(nodeInfo.NodeID)
+ if err != nil {
+ return nil, err
+ }
+
+ // can't stop the raft node while an async RPC is in progress
+ n.stopMu.RLock()
+ defer n.stopMu.RUnlock()
+
+ if n.Node == nil {
+ log.WithError(ErrStopped).Errorf(ErrStopped.Error())
+ return nil, ErrStopped
+ }
+
+ // We submit a configuration change only if the node was not registered yet
+ if n.cluster.GetMember(raftID) == nil {
+ err = n.addMember(ctx, req.Addr, raftID)
+ if err != nil {
+ log.WithError(err).Errorf("failed to add member")
+ return nil, err
+ }
+ }
+
+ var nodes []*api.RaftMember
+ for _, node := range n.cluster.Members() {
+ nodes = append(nodes, &api.RaftMember{
+ RaftID: node.RaftID,
+ Addr: node.Addr,
+ })
+ }
+ log.Debugf("node joined")
+
+ return &api.JoinResponse{Members: nodes}, nil
+}
+
+// addMember submits a configuration change to add a new member on the raft cluster.
+func (n *Node) addMember(ctx context.Context, addr string, raftID uint64) error {
+ node := api.RaftMember{
+ RaftID: raftID,
+ Addr: addr,
+ }
+
+ meta, err := node.Marshal()
+ if err != nil {
+ return err
+ }
+
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: raftID,
+ Context: meta,
+ }
+
+ // Wait for a raft round to process the configuration change
+ err = n.configure(ctx, cc)
+ return err
+}
+
+// Leave asks to a member of the raft to remove
+// us from the raft cluster. This method is called
+// from a member who is willing to leave its raft
+// membership to an active member of the raft
+func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResponse, error) {
+ nodeInfo, err := ca.RemoteNode(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ fields := logrus.Fields{
+ "node.id": nodeInfo.NodeID,
+ "method": "(*Node).Leave",
+ }
+ if nodeInfo.ForwardedBy != nil {
+ fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+ }
+ log.G(ctx).WithFields(fields).Debugf("")
+
+ // can't stop the raft node while an async RPC is in progress
+ n.stopMu.RLock()
+ defer n.stopMu.RUnlock()
+
+ if n.Node == nil {
+ return nil, ErrStopped
+ }
+
+ err = n.RemoveMember(ctx, req.Node.RaftID)
+ if err != nil {
+ return nil, err
+ }
+
+ return &api.LeaveResponse{}, nil
+}
+
+// RemoveMember submits a configuration change to remove a member from the raft cluster.
+func (n *Node) RemoveMember(ctx context.Context, id uint64) error {
+ // TODO(abronan): this can race if multiple removes are processed, we should
+ // send all the requests to the Leader and track pending removals.
+ if n.cluster.CanRemoveMember(n.Config.ID, id) {
+ cc := raftpb.ConfChange{
+ ID: id,
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ Context: []byte(""),
+ }
+
+ err := n.configure(ctx, cc)
+ return err
+ }
+
+ return ErrCannotRemoveMember
+}
+
+// ProcessRaftMessage calls 'Step' which advances the
+// raft state machine with the provided message on the
+// receiving node
+func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessageRequest) (*api.ProcessRaftMessageResponse, error) {
+ // Don't process the message if this comes from
+ // a node in the remove set
+ if n.cluster.IsIDRemoved(msg.Message.From) {
+ return nil, ErrMemberRemoved
+ }
+
+ // can't stop the raft node while an async RPC is in progress
+ n.stopMu.RLock()
+ defer n.stopMu.RUnlock()
+ if n.Node == nil {
+ return nil, ErrStopped
+ }
+
+ if err := n.Step(n.Ctx, *msg.Message); err != nil {
+ return nil, err
+ }
+
+ return &api.ProcessRaftMessageResponse{}, nil
+}
+
+// ResolveAddress returns the address reaching for a given node ID.
+func (n *Node) ResolveAddress(ctx context.Context, msg *api.ResolveAddressRequest) (*api.ResolveAddressResponse, error) {
+ nodeInfo, err := ca.RemoteNode(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ fields := logrus.Fields{
+ "node.id": nodeInfo.NodeID,
+ "method": "(*Node).ResolveAddress",
+ }
+ if nodeInfo.ForwardedBy != nil {
+ fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+ }
+ log.G(ctx).WithFields(fields).Debugf("")
+
+ member := n.cluster.GetMember(msg.RaftID)
+ if member == nil {
+ return nil, grpc.Errorf(codes.NotFound, "member %s not found", identity.FormatNodeID(msg.RaftID))
+ }
+ return &api.ResolveAddressResponse{Addr: member.Addr}, nil
+}
+
+// LeaderAddr returns address of current cluster leader.
+// With this method Node satisfies raftpicker.AddrSelector interface.
+func (n *Node) LeaderAddr() (string, error) {
+ n.stopMu.RLock()
+ defer n.stopMu.RUnlock()
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ if err := WaitForLeader(ctx, n); err != nil {
+ return "", ErrNoClusterLeader
+ }
+ if n.Node == nil {
+ return "", ErrStopped
+ }
+ ms := n.cluster.Members()
+ l := ms[n.Leader()]
+ if l == nil {
+ return "", ErrNoClusterLeader
+ }
+ return l.Addr, nil
+}
+
+// registerNode registers a new node on the cluster memberlist
+func (n *Node) registerNode(node *api.RaftMember) error {
+ member := &membership.Member{}
+
+ // Avoid opening a connection to the local node
+ if node.RaftID != n.Config.ID {
+ // We don't want to impose a timeout on the grpc connection. It
+ // should keep retrying as long as necessary, in case the peer
+ // is temporarily unavailable.
+ var err error
+ if member, err = n.ConnectToMember(node.Addr, 0); err != nil {
+ return err
+ }
+ }
+
+ member.RaftMember = node
+ err := n.cluster.AddMember(member)
+ if err != nil {
+ if member.Conn != nil {
+ _ = member.Conn.Close()
+ }
+ return err
+ }
+ return nil
+}
+
+// registerNodes registers a set of nodes in the cluster
+func (n *Node) registerNodes(nodes []*api.RaftMember) error {
+ for _, node := range nodes {
+ if err := n.registerNode(node); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ProposeValue calls Propose on the raft and waits
+// on the commit log action before returning a result
+func (n *Node) ProposeValue(ctx context.Context, storeAction []*api.StoreAction, cb func()) error {
+ _, err := n.processInternalRaftRequest(ctx, &api.InternalRaftRequest{Action: storeAction}, cb)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// GetVersion returns the sequence information for the current raft round.
+func (n *Node) GetVersion() *api.Version {
+ status := n.Node.Status()
+ return &api.Version{Index: status.Commit}
+}
+
+// GetMemberlist returns the current list of raft members in the cluster.
+func (n *Node) GetMemberlist() map[uint64]*api.RaftMember {
+ memberlist := make(map[uint64]*api.RaftMember)
+ members := n.cluster.Members()
+ leaderID := n.Leader()
+
+ for id, member := range members {
+ reachability := api.RaftMemberStatus_REACHABLE
+ leader := false
+
+ if member.RaftID != n.Config.ID {
+ connState, err := member.Conn.State()
+ if err != nil || connState != grpc.Ready {
+ reachability = api.RaftMemberStatus_UNREACHABLE
+ }
+ }
+
+ if member.RaftID == leaderID {
+ leader = true
+ }
+
+ memberlist[id] = &api.RaftMember{
+ RaftID: member.RaftID,
+ Addr: member.Addr,
+ Status: api.RaftMemberStatus{
+ Leader: leader,
+ Reachability: reachability,
+ },
+ }
+ }
+
+ return memberlist
+}
+
+// mustStop checks if the raft node must be stopped
+// because it was removed from the cluster from
+// other members
+func (n *Node) mustStop() bool {
+ return atomic.LoadUint32(&n.removed) == 1
+}
+
+// Saves a log entry to our Store
+func (n *Node) saveToStorage(raftConfig *api.RaftConfig, hardState raftpb.HardState, entries []raftpb.Entry, snapshot raftpb.Snapshot) (err error) {
+ if !raft.IsEmptySnap(snapshot) {
+ if err := n.saveSnapshot(snapshot, raftConfig.KeepOldSnapshots); err != nil {
+ return ErrApplySnapshot
+ }
+ if err = n.raftStore.ApplySnapshot(snapshot); err != nil {
+ return ErrApplySnapshot
+ }
+ }
+
+ if err := n.wal.Save(hardState, entries); err != nil {
+ // TODO(aaronl): These error types should really wrap more
+ // detailed errors.
+ return ErrApplySnapshot
+ }
+
+ if err = n.raftStore.Append(entries); err != nil {
+ return ErrAppendEntry
+ }
+
+ return nil
+}
+
+// Sends a series of messages to members in the raft
+func (n *Node) send(messages []raftpb.Message) error {
+ members := n.cluster.Members()
+
+ n.stopMu.RLock()
+ defer n.stopMu.RUnlock()
+
+ for _, m := range messages {
+ // Process locally
+ if m.To == n.Config.ID {
+ if err := n.Step(n.Ctx, m); err != nil {
+ return err
+ }
+ continue
+ }
+
+ n.asyncTasks.Add(1)
+ go n.sendToMember(members, m)
+ }
+
+ return nil
+}
+
+func (n *Node) sendToMember(members map[uint64]*membership.Member, m raftpb.Message) {
+ defer n.asyncTasks.Done()
+
+ if n.cluster.IsIDRemoved(m.To) {
+ // Should not send to removed members
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(n.Ctx, n.sendTimeout)
+ defer cancel()
+
+ var (
+ conn *membership.Member
+ )
+ if toMember, ok := members[m.To]; ok {
+ conn = toMember
+ } else {
+ // If we are being asked to send to a member that's not in
+ // our member list, that could indicate that the current leader
+ // was added while we were offline. Try to resolve its address.
+ n.Config.Logger.Warningf("sending message to an unrecognized member ID %s", identity.FormatNodeID(m.To))
+
+ // Choose a random member
+ var (
+ queryMember *membership.Member
+ id uint64
+ )
+ for id, queryMember = range members {
+ if id != n.Config.ID {
+ break
+ }
+ }
+
+ if queryMember == nil {
+ n.Config.Logger.Error("could not find cluster member to query for leader address")
+ return
+ }
+
+ resp, err := queryMember.ResolveAddress(ctx, &api.ResolveAddressRequest{RaftID: m.To})
+ if err != nil {
+ n.Config.Logger.Errorf("could not resolve address of member ID %s: %v", identity.FormatNodeID(m.To), err)
+ return
+ }
+ conn, err = n.ConnectToMember(resp.Addr, n.sendTimeout)
+ if err != nil {
+ n.Config.Logger.Errorf("could connect to member ID %s at %s: %v", identity.FormatNodeID(m.To), resp.Addr, err)
+ return
+ }
+ // The temporary connection is only used for this message.
+ // Eventually, we should catch up and add a long-lived
+ // connection to the member list.
+ defer conn.Conn.Close()
+ }
+
+ _, err := conn.ProcessRaftMessage(ctx, &api.ProcessRaftMessageRequest{Message: &m})
+ if err != nil {
+ if grpc.ErrorDesc(err) == ErrMemberRemoved.Error() {
+ atomic.StoreUint32(&n.removed, 1)
+ }
+ if m.Type == raftpb.MsgSnap {
+ n.ReportSnapshot(m.To, raft.SnapshotFailure)
+ }
+ if n.Node == nil {
+ panic("node is nil")
+ }
+ n.ReportUnreachable(m.To)
+ } else if m.Type == raftpb.MsgSnap {
+ n.ReportSnapshot(m.To, raft.SnapshotFinish)
+ }
+}
+
+type applyResult struct {
+ resp proto.Message
+ err error
+}
+
+// processInternalRaftRequest sends a message through consensus
+// and then waits for it to be applies to the server. It will
+// block until the change is performed or there is an error
+func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRaftRequest, cb func()) (proto.Message, error) {
+ r.ID = n.reqIDGen.Next()
+
+ ch := n.wait.register(r.ID, cb)
+
+ // Do this check after calling register to avoid a race.
+ if !n.IsLeader() {
+ n.wait.cancel(r.ID)
+ return nil, ErrLostLeadership
+ }
+
+ data, err := r.Marshal()
+ if err != nil {
+ n.wait.cancel(r.ID)
+ return nil, err
+ }
+
+ if len(data) > store.MaxTransactionBytes {
+ n.wait.cancel(r.ID)
+ return nil, ErrRequestTooLarge
+ }
+
+ err = n.Propose(ctx, data)
+ if err != nil {
+ n.wait.cancel(r.ID)
+ return nil, err
+ }
+
+ select {
+ case x, ok := <-ch:
+ if ok {
+ res := x.(*applyResult)
+ return res.resp, res.err
+ }
+ return nil, ErrLostLeadership
+ case <-n.stopCh:
+ n.wait.cancel(r.ID)
+ return nil, ErrStopped
+ case <-ctx.Done():
+ n.wait.cancel(r.ID)
+ return nil, ctx.Err()
+ }
+}
+
+// configure sends a configuration change through consensus and
+// then waits for it to be applied to the server. It will block
+// until the change is performed or there is an error.
+func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
+ cc.ID = n.reqIDGen.Next()
+ ch := n.wait.register(cc.ID, nil)
+
+ if err := n.ProposeConfChange(ctx, cc); err != nil {
+ n.wait.trigger(cc.ID, nil)
+ return err
+ }
+
+ select {
+ case x := <-ch:
+ if err, ok := x.(error); ok {
+ return err
+ }
+ if x != nil {
+ log.G(ctx).Panic("raft: configuration change error, return type should always be error")
+ }
+ return nil
+ case <-ctx.Done():
+ n.wait.trigger(cc.ID, nil)
+ return ctx.Err()
+ case <-n.stopCh:
+ return ErrStopped
+ }
+}
+
+func (n *Node) processCommitted(entry raftpb.Entry) error {
+ // Process a normal entry
+ if entry.Type == raftpb.EntryNormal && entry.Data != nil {
+ if err := n.processEntry(entry); err != nil {
+ return err
+ }
+ }
+
+ // Process a configuration change (add/remove node)
+ if entry.Type == raftpb.EntryConfChange {
+ n.processConfChange(entry)
+ }
+
+ n.appliedIndex = entry.Index
+ return nil
+}
+
+func (n *Node) processEntry(entry raftpb.Entry) error {
+ r := &api.InternalRaftRequest{}
+ err := proto.Unmarshal(entry.Data, r)
+ if err != nil {
+ return err
+ }
+
+ if r.Action == nil {
+ return nil
+ }
+
+ if !n.wait.trigger(r.ID, &applyResult{resp: r, err: nil}) {
+ // There was no wait on this ID, meaning we don't have a
+ // transaction in progress that would be committed to the
+ // memory store by the "trigger" call. Either a different node
+ // wrote this to raft, or we wrote it before losing the leader
+ // position and cancelling the transaction. Create a new
+ // transaction to commit the data.
+
+ err := n.memoryStore.ApplyStoreActions(r.Action)
+ if err != nil {
+ log.G(context.Background()).Errorf("error applying actions from raft: %v", err)
+ }
+ }
+ return nil
+}
+
+func (n *Node) processConfChange(entry raftpb.Entry) {
+ var (
+ err error
+ cc raftpb.ConfChange
+ )
+
+ if err := proto.Unmarshal(entry.Data, &cc); err != nil {
+ n.wait.trigger(cc.ID, err)
+ }
+
+ if err := n.cluster.ValidateConfigurationChange(cc); err != nil {
+ n.wait.trigger(cc.ID, err)
+ }
+
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode:
+ err = n.applyAddNode(cc)
+ case raftpb.ConfChangeRemoveNode:
+ err = n.applyRemoveNode(cc)
+ }
+
+ if err != nil {
+ n.wait.trigger(cc.ID, err)
+ }
+
+ n.confState = *n.ApplyConfChange(cc)
+ n.wait.trigger(cc.ID, nil)
+}
+
+// applyAddNode is called when we receive a ConfChange
+// from a member in the raft cluster, this adds a new
+// node to the existing raft cluster
+func (n *Node) applyAddNode(cc raftpb.ConfChange) error {
+ member := &api.RaftMember{}
+ err := proto.Unmarshal(cc.Context, member)
+ if err != nil {
+ return err
+ }
+
+ // ID must be non zero
+ if member.RaftID == 0 {
+ return nil
+ }
+
+ if err = n.registerNode(member); err != nil {
+ return err
+ }
+ return nil
+}
+
+// applyRemoveNode is called when we receive a ConfChange
+// from a member in the raft cluster, this removes a node
+// from the existing raft cluster
+func (n *Node) applyRemoveNode(cc raftpb.ConfChange) (err error) {
+ // If the node from where the remove is issued is
+ // a follower and the leader steps down, Campaign
+ // to be the leader.
+ if cc.NodeID == n.Leader() {
+ if err = n.Campaign(n.Ctx); err != nil {
+ return err
+ }
+ }
+
+ // Do not unregister yourself
+ if n.Config.ID == cc.NodeID {
+ return nil
+ }
+
+ return n.cluster.RemoveMember(cc.NodeID)
+}
+
+// ConnectToMember returns a member object with an initialized
+// connection to communicate with other raft members
+func (n *Node) ConnectToMember(addr string, timeout time.Duration) (*membership.Member, error) {
+ conn, err := dial(addr, "tcp", n.tlsCredentials, timeout)
+ if err != nil {
+ return nil, err
+ }
+
+ return &membership.Member{
+ RaftClient: api.NewRaftClient(conn),
+ Conn: conn,
+ }, nil
+}
+
+// SubscribeLeadership returns channel to which events about leadership change
+// will be sent in form of raft.LeadershipState. Also cancel func is returned -
+// it should be called when listener is not longer interested in events.
+func (n *Node) SubscribeLeadership() (q chan events.Event, cancel func()) {
+ ch := events.NewChannel(0)
+ sink := events.Sink(events.NewQueue(ch))
+ n.leadershipBroadcast.Add(sink)
+ return ch.C, func() {
+ n.leadershipBroadcast.Remove(sink)
+ ch.Close()
+ sink.Close()
+ }
+}
+
+// createConfigChangeEnts creates a series of Raft entries (i.e.
+// EntryConfChange) to remove the set of given IDs from the cluster. The ID
+// `self` is _not_ removed, even if present in the set.
+// If `self` is not inside the given ids, it creates a Raft entry to add a
+// default member with the given `self`.
+func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
+ var ents []raftpb.Entry
+ next := index + 1
+ found := false
+ for _, id := range ids {
+ if id == self {
+ found = true
+ continue
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ data, err := cc.Marshal()
+ if err != nil {
+ log.G(context.Background()).Panicf("marshal configuration change should never fail: %v", err)
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: data,
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+ if !found {
+ node := &api.RaftMember{RaftID: self}
+ meta, err := node.Marshal()
+ if err != nil {
+ log.G(context.Background()).Panicf("marshal member should never fail: %v", err)
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: self,
+ Context: meta,
+ }
+ data, err := cc.Marshal()
+ if err != nil {
+ log.G(context.Background()).Panicf("marshal configuration change should never fail: %v", err)
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: data,
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ }
+ return ents
+}
+
+// getIDs returns an ordered set of IDs included in the given snapshot and
+// the entries. The given snapshot/entries can contain two kinds of
+// ID-related entry:
+// - ConfChangeAddNode, in which case the contained ID will be added into the set.
+// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
+func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+ ids := make(map[uint64]bool)
+ if snap != nil {
+ for _, id := range snap.Metadata.ConfState.Nodes {
+ ids[id] = true
+ }
+ }
+ for _, e := range ents {
+ if e.Type != raftpb.EntryConfChange {
+ continue
+ }
+ if snap != nil && e.Index < snap.Metadata.Index {
+ continue
+ }
+ var cc raftpb.ConfChange
+ if err := cc.Unmarshal(e.Data); err != nil {
+ log.G(context.Background()).Panicf("unmarshal configuration change should never fail: %v", err)
+ }
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeRemoveNode:
+ delete(ids, cc.NodeID)
+ case raftpb.ConfChangeUpdateNode:
+ // do nothing
+ default:
+ log.G(context.Background()).Panic("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
+ }
+ }
+ var sids []uint64
+ for id := range ids {
+ sids = append(sids, id)
+ }
+ return sids
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/raft/storage.go b/vendor/src/github.com/docker/swarmkit/manager/state/raft/storage.go
new file mode 100644
index 0000000000..5804b02ec5
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/raft/storage.go
@@ -0,0 +1,325 @@
+package raft
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/coreos/etcd/raft"
+ "github.com/coreos/etcd/raft/raftpb"
+ "github.com/coreos/etcd/snap"
+ "github.com/coreos/etcd/wal"
+ "github.com/coreos/etcd/wal/walpb"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/log"
+ "github.com/docker/swarmkit/manager/state/raft/membership"
+ "github.com/docker/swarmkit/manager/state/store"
+ "golang.org/x/net/context"
+)
+
+func (n *Node) walDir() string {
+ return filepath.Join(n.StateDir, "wal")
+}
+
+func (n *Node) snapDir() string {
+ return filepath.Join(n.StateDir, "snap")
+}
+
+func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error {
+ walDir := n.walDir()
+ snapDir := n.snapDir()
+
+ if err := os.MkdirAll(snapDir, 0700); err != nil {
+ return fmt.Errorf("create snapshot directory error: %v", err)
+ }
+
+ // Create a snapshotter
+ n.snapshotter = snap.New(snapDir)
+
+ if !wal.Exist(walDir) {
+ raftNode := &api.RaftMember{
+ RaftID: n.Config.ID,
+ Addr: n.Address,
+ }
+ metadata, err := raftNode.Marshal()
+ if err != nil {
+ return fmt.Errorf("error marshalling raft node: %v", err)
+ }
+ n.wal, err = wal.Create(walDir, metadata)
+ if err != nil {
+ return fmt.Errorf("create wal error: %v", err)
+ }
+
+ n.cluster.AddMember(&membership.Member{RaftMember: raftNode})
+ n.startNodePeers = []raft.Peer{{ID: n.Config.ID, Context: metadata}}
+
+ return nil
+ }
+
+ // Load snapshot data
+ snapshot, err := n.snapshotter.Load()
+ if err != nil && err != snap.ErrNoSnapshot {
+ return err
+ }
+
+ if snapshot != nil {
+ // Load the snapshot data into the store
+ if err := n.restoreFromSnapshot(snapshot.Data, forceNewCluster); err != nil {
+ return err
+ }
+ }
+
+ // Read logs to fully catch up store
+ if err := n.readWAL(ctx, snapshot, forceNewCluster); err != nil {
+ return err
+ }
+
+ n.Node = raft.RestartNode(n.Config)
+ return nil
+}
+
+func (n *Node) readWAL(ctx context.Context, snapshot *raftpb.Snapshot, forceNewCluster bool) (err error) {
+ var (
+ walsnap walpb.Snapshot
+ metadata []byte
+ st raftpb.HardState
+ ents []raftpb.Entry
+ )
+
+ if snapshot != nil {
+ walsnap.Index = snapshot.Metadata.Index
+ walsnap.Term = snapshot.Metadata.Term
+ }
+
+ repaired := false
+ for {
+ if n.wal, err = wal.Open(n.walDir(), walsnap); err != nil {
+ return fmt.Errorf("open wal error: %v", err)
+ }
+ if metadata, st, ents, err = n.wal.ReadAll(); err != nil {
+ if err := n.wal.Close(); err != nil {
+ return err
+ }
+ // we can only repair ErrUnexpectedEOF and we never repair twice.
+ if repaired || err != io.ErrUnexpectedEOF {
+ return fmt.Errorf("read wal error (%v) and cannot be repaired", err)
+ }
+ if !wal.Repair(n.walDir()) {
+ return fmt.Errorf("WAL error (%v) cannot be repaired", err)
+ }
+ log.G(ctx).Infof("repaired WAL error (%v)", err)
+ repaired = true
+ continue
+ }
+ break
+ }
+
+ defer func() {
+ if err != nil {
+ if walErr := n.wal.Close(); walErr != nil {
+ n.Config.Logger.Errorf("error closing raft WAL: %v", walErr)
+ }
+ }
+ }()
+
+ var raftNode api.RaftMember
+ if err := raftNode.Unmarshal(metadata); err != nil {
+ return fmt.Errorf("error unmarshalling wal metadata: %v", err)
+ }
+ n.Config.ID = raftNode.RaftID
+
+ if forceNewCluster {
+ // discard the previously uncommitted entries
+ for i, ent := range ents {
+ if ent.Index > st.Commit {
+ log.G(context.Background()).Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
+ ents = ents[:i]
+ break
+ }
+ }
+
+ // force append the configuration change entries
+ toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(n.Config.ID), st.Term, st.Commit)
+ ents = append(ents, toAppEnts...)
+
+ // force commit newly appended entries
+ err := n.wal.Save(st, toAppEnts)
+ if err != nil {
+ log.G(context.Background()).Fatalf("%v", err)
+ }
+ if len(toAppEnts) != 0 {
+ st.Commit = toAppEnts[len(toAppEnts)-1].Index
+ }
+ }
+
+ if snapshot != nil {
+ if err := n.raftStore.ApplySnapshot(*snapshot); err != nil {
+ return err
+ }
+ }
+ if err := n.raftStore.SetHardState(st); err != nil {
+ return err
+ }
+ if err := n.raftStore.Append(ents); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (n *Node) saveSnapshot(snapshot raftpb.Snapshot, keepOldSnapshots uint64) error {
+ err := n.wal.SaveSnapshot(walpb.Snapshot{
+ Index: snapshot.Metadata.Index,
+ Term: snapshot.Metadata.Term,
+ })
+ if err != nil {
+ return err
+ }
+ err = n.snapshotter.SaveSnap(snapshot)
+ if err != nil {
+ return err
+ }
+ err = n.wal.ReleaseLockTo(snapshot.Metadata.Index)
+ if err != nil {
+ return err
+ }
+
+ // Delete any older snapshots
+ curSnapshot := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, ".snap")
+
+ dirents, err := ioutil.ReadDir(n.snapDir())
+ if err != nil {
+ return err
+ }
+
+ var snapshots []string
+ for _, dirent := range dirents {
+ if strings.HasSuffix(dirent.Name(), ".snap") {
+ snapshots = append(snapshots, dirent.Name())
+ }
+ }
+
+ // Sort snapshot filenames in reverse lexical order
+ sort.Sort(sort.Reverse(sort.StringSlice(snapshots)))
+
+ // Ignore any snapshots that are older than the current snapshot.
+ // Delete the others. Rather than doing lexical comparisons, we look
+ // at what exists before/after the current snapshot in the slice.
+ // This means that if the current snapshot doesn't appear in the
+ // directory for some strange reason, we won't delete anything, which
+ // is the safe behavior.
+ var (
+ afterCurSnapshot bool
+ removeErr error
+ )
+ for i, snapFile := range snapshots {
+ if afterCurSnapshot {
+ if uint64(len(snapshots)-i) <= keepOldSnapshots {
+ return removeErr
+ }
+ err := os.Remove(filepath.Join(n.snapDir(), snapFile))
+ if err != nil && removeErr == nil {
+ removeErr = err
+ }
+ } else if snapFile == curSnapshot {
+ afterCurSnapshot = true
+ }
+ }
+
+ return removeErr
+}
+
+func (n *Node) doSnapshot(raftConfig *api.RaftConfig) {
+ snapshot := api.Snapshot{Version: api.Snapshot_V0}
+ for _, member := range n.cluster.Members() {
+ snapshot.Membership.Members = append(snapshot.Membership.Members,
+ &api.RaftMember{
+ RaftID: member.RaftID,
+ Addr: member.Addr,
+ })
+ }
+ snapshot.Membership.Removed = n.cluster.Removed()
+
+ viewStarted := make(chan struct{})
+ n.asyncTasks.Add(1)
+ n.snapshotInProgress = make(chan uint64, 1) // buffered in case Shutdown is called during the snapshot
+ go func(appliedIndex, snapshotIndex uint64) {
+ defer func() {
+ n.asyncTasks.Done()
+ n.snapshotInProgress <- snapshotIndex
+ }()
+
+ var err error
+ n.memoryStore.View(func(tx store.ReadTx) {
+ close(viewStarted)
+
+ var storeSnapshot *api.StoreSnapshot
+ storeSnapshot, err = n.memoryStore.Save(tx)
+ snapshot.Store = *storeSnapshot
+ })
+ if err != nil {
+ n.Config.Logger.Error(err)
+ return
+ }
+
+ d, err := snapshot.Marshal()
+ if err != nil {
+ n.Config.Logger.Error(err)
+ return
+ }
+ snap, err := n.raftStore.CreateSnapshot(appliedIndex, &n.confState, d)
+ if err == nil {
+ if err := n.saveSnapshot(snap, raftConfig.KeepOldSnapshots); err != nil {
+ n.Config.Logger.Error(err)
+ return
+ }
+ snapshotIndex = appliedIndex
+
+ if appliedIndex > raftConfig.LogEntriesForSlowFollowers {
+ err := n.raftStore.Compact(appliedIndex - raftConfig.LogEntriesForSlowFollowers)
+ if err != nil && err != raft.ErrCompacted {
+ n.Config.Logger.Error(err)
+ }
+ }
+ } else if err != raft.ErrSnapOutOfDate {
+ n.Config.Logger.Error(err)
+ }
+ }(n.appliedIndex, n.snapshotIndex)
+
+ // Wait for the goroutine to establish a read transaction, to make
+ // sure it sees the state as of this moment.
+ <-viewStarted
+}
+
+func (n *Node) restoreFromSnapshot(data []byte, forceNewCluster bool) error {
+ var snapshot api.Snapshot
+ if err := snapshot.Unmarshal(data); err != nil {
+ return err
+ }
+ if snapshot.Version != api.Snapshot_V0 {
+ return fmt.Errorf("unrecognized snapshot version %d", snapshot.Version)
+ }
+
+ if err := n.memoryStore.Restore(&snapshot.Store); err != nil {
+ return err
+ }
+
+ n.cluster.Clear()
+
+ if !forceNewCluster {
+ for _, member := range snapshot.Membership.Members {
+ if err := n.registerNode(&api.RaftMember{RaftID: member.RaftID, Addr: member.Addr}); err != nil {
+ return err
+ }
+ }
+ for _, removedMember := range snapshot.Membership.Removed {
+ n.cluster.RemoveMember(removedMember)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go b/vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go
new file mode 100644
index 0000000000..ad1b064dd5
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go
@@ -0,0 +1,82 @@
+package raft
+
+import (
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/store"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+// dial returns a grpc client connection
+func dial(addr string, protocol string, creds credentials.TransportAuthenticator, timeout time.Duration) (*grpc.ClientConn, error) {
+ grpcOptions := []grpc.DialOption{
+ grpc.WithBackoffMaxDelay(2 * time.Second),
+ grpc.WithTransportCredentials(creds),
+ }
+
+ if timeout != 0 {
+ grpcOptions = append(grpcOptions, grpc.WithTimeout(timeout))
+ }
+
+ return grpc.Dial(addr, grpcOptions...)
+}
+
+// Register registers the node raft server
+func Register(server *grpc.Server, node *Node) {
+ api.RegisterRaftServer(server, node)
+}
+
+// WaitForLeader waits until node observe some leader in cluster. It returns
+// error if ctx was cancelled before leader appeared.
+func WaitForLeader(ctx context.Context, n *Node) error {
+ l := n.Leader()
+ if l != 0 {
+ return nil
+ }
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+ for l == 0 {
+ select {
+ case <-ticker.C:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ l = n.Leader()
+ }
+ return nil
+}
+
+// WaitForCluster waits until node observes that the cluster wide config is
+// committed to raft. This ensures that we can see and serve informations
+// related to the cluster.
+func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) {
+ watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), state.EventCreateCluster{})
+ defer cancel()
+
+ var clusters []*api.Cluster
+ n.MemoryStore().View(func(readTx store.ReadTx) {
+ clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(clusters) == 1 {
+ cluster = clusters[0]
+ } else {
+ select {
+ case e := <-watch:
+ cluster = e.(state.EventCreateCluster).Cluster
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+
+ return cluster, nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go b/vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go
new file mode 100644
index 0000000000..297f0cf970
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go
@@ -0,0 +1,70 @@
+package raft
+
+import (
+ "fmt"
+ "sync"
+)
+
+type waitItem struct {
+ // channel to wait up the waiter
+ ch chan interface{}
+ // callback which is called synchronously when the wait is triggered
+ cb func()
+}
+
+type wait struct {
+ l sync.Mutex
+ m map[uint64]waitItem
+}
+
+func newWait() *wait {
+ return &wait{m: make(map[uint64]waitItem)}
+}
+
+func (w *wait) register(id uint64, cb func()) <-chan interface{} {
+ w.l.Lock()
+ defer w.l.Unlock()
+ _, ok := w.m[id]
+ if !ok {
+ ch := make(chan interface{}, 1)
+ w.m[id] = waitItem{ch: ch, cb: cb}
+ return ch
+ }
+ panic(fmt.Sprintf("duplicate id %x", id))
+}
+
+func (w *wait) trigger(id uint64, x interface{}) bool {
+ w.l.Lock()
+ waitItem, ok := w.m[id]
+ delete(w.m, id)
+ w.l.Unlock()
+ if ok {
+ if waitItem.cb != nil {
+ waitItem.cb()
+ }
+ waitItem.ch <- x
+ close(waitItem.ch)
+ return true
+ }
+ return false
+}
+
+func (w *wait) cancel(id uint64) {
+ w.l.Lock()
+ waitItem, ok := w.m[id]
+ delete(w.m, id)
+ w.l.Unlock()
+ if ok {
+ close(waitItem.ch)
+ }
+}
+
+func (w *wait) cancelAll() {
+ w.l.Lock()
+ defer w.l.Unlock()
+
+ for id, waitItem := range w.m {
+ delete(w.m, id)
+ close(waitItem.ch)
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/apply.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/apply.go
new file mode 100644
index 0000000000..cafcdc276d
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/apply.go
@@ -0,0 +1,48 @@
+package store
+
+import (
+ "errors"
+
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/manager/state"
+)
+
+// Apply takes an item from the event stream of one Store and applies it to
+// a second Store.
+func Apply(store *MemoryStore, item events.Event) (err error) {
+ return store.Update(func(tx Tx) error {
+ switch v := item.(type) {
+ case state.EventCreateTask:
+ return CreateTask(tx, v.Task)
+ case state.EventUpdateTask:
+ return UpdateTask(tx, v.Task)
+ case state.EventDeleteTask:
+ return DeleteTask(tx, v.Task.ID)
+
+ case state.EventCreateService:
+ return CreateService(tx, v.Service)
+ case state.EventUpdateService:
+ return UpdateService(tx, v.Service)
+ case state.EventDeleteService:
+ return DeleteService(tx, v.Service.ID)
+
+ case state.EventCreateNetwork:
+ return CreateNetwork(tx, v.Network)
+ case state.EventUpdateNetwork:
+ return UpdateNetwork(tx, v.Network)
+ case state.EventDeleteNetwork:
+ return DeleteNetwork(tx, v.Network.ID)
+
+ case state.EventCreateNode:
+ return CreateNode(tx, v.Node)
+ case state.EventUpdateNode:
+ return UpdateNode(tx, v.Node)
+ case state.EventDeleteNode:
+ return DeleteNode(tx, v.Node.ID)
+
+ case state.EventCommit:
+ return nil
+ }
+ return errors.New("unrecognized event type")
+ })
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/by.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/by.go
new file mode 100644
index 0000000000..e4bb335229
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/by.go
@@ -0,0 +1,113 @@
+package store
+
+import "github.com/docker/swarmkit/api"
+
+// By is an interface type passed to Find methods. Implementations must be
+// defined in this package.
+type By interface {
+ // isBy allows this interface to only be satisfied by certain internal
+ // types.
+ isBy()
+}
+
+type byAll struct{}
+
+func (a byAll) isBy() {
+}
+
+// All is an argument that can be passed to find to list all items in the
+// set.
+var All byAll
+
+type byIDPrefix string
+
+func (b byIDPrefix) isBy() {
+}
+
+// ByIDPrefix creates an object to pass to Find to select by query.
+func ByIDPrefix(idPrefix string) By {
+ return byIDPrefix(idPrefix)
+}
+
+type byName string
+
+func (b byName) isBy() {
+}
+
+// ByName creates an object to pass to Find to select by name.
+func ByName(name string) By {
+ return byName(name)
+}
+
+type byCN string
+
+func (b byCN) isBy() {
+}
+
+// ByCN creates an object to pass to Find to select by CN.
+func ByCN(name string) By {
+ return byCN(name)
+}
+
+type byService string
+
+func (b byService) isBy() {
+}
+
+// ByServiceID creates an object to pass to Find to select by service.
+func ByServiceID(serviceID string) By {
+ return byService(serviceID)
+}
+
+type byNode string
+
+func (b byNode) isBy() {
+}
+
+// ByNodeID creates an object to pass to Find to select by node.
+func ByNodeID(nodeID string) By {
+ return byNode(nodeID)
+}
+
+type bySlot struct {
+ serviceID string
+ slot uint64
+}
+
+func (b bySlot) isBy() {
+}
+
+// BySlot creates an object to pass to Find to select by slot.
+func BySlot(serviceID string, slot uint64) By {
+ return bySlot{serviceID: serviceID, slot: slot}
+}
+
+type byDesiredState api.TaskState
+
+func (b byDesiredState) isBy() {
+}
+
+// ByDesiredState creates an object to pass to Find to select by desired state.
+func ByDesiredState(state api.TaskState) By {
+ return byDesiredState(state)
+}
+
+type byRole api.NodeRole
+
+func (b byRole) isBy() {
+}
+
+// ByRole creates an object to pass to Find to select by role.
+func ByRole(role api.NodeRole) By {
+ return byRole(role)
+}
+
+type byMembership api.NodeSpec_Membership
+
+func (b byMembership) isBy() {
+}
+
+// ByMembership creates an object to pass to Find to select by Membership.
+func ByMembership(membership api.NodeSpec_Membership) By {
+ return byMembership(membership)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/clusters.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/clusters.go
new file mode 100644
index 0000000000..f37d58dc2a
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/clusters.go
@@ -0,0 +1,227 @@
+package store
+
+import (
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ memdb "github.com/hashicorp/go-memdb"
+)
+
+const (
+ tableCluster = "cluster"
+
+ // DefaultClusterName is the default name to use for the cluster
+ // object.
+ DefaultClusterName = "default"
+)
+
+func init() {
+ register(ObjectStoreConfig{
+ Name: tableCluster,
+ Table: &memdb.TableSchema{
+ Name: tableCluster,
+ Indexes: map[string]*memdb.IndexSchema{
+ indexID: {
+ Name: indexID,
+ Unique: true,
+ Indexer: clusterIndexerByID{},
+ },
+ indexName: {
+ Name: indexName,
+ Unique: true,
+ Indexer: clusterIndexerByName{},
+ },
+ },
+ },
+ Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
+ var err error
+ snapshot.Clusters, err = FindClusters(tx, All)
+ return err
+ },
+ Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
+ clusters, err := FindClusters(tx, All)
+ if err != nil {
+ return err
+ }
+ for _, n := range clusters {
+ if err := DeleteCluster(tx, n.ID); err != nil {
+ return err
+ }
+ }
+ for _, n := range snapshot.Clusters {
+ if err := CreateCluster(tx, n); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
+ switch v := sa.Target.(type) {
+ case *api.StoreAction_Cluster:
+ obj := v.Cluster
+ switch sa.Action {
+ case api.StoreActionKindCreate:
+ return CreateCluster(tx, obj)
+ case api.StoreActionKindUpdate:
+ return UpdateCluster(tx, obj)
+ case api.StoreActionKindRemove:
+ return DeleteCluster(tx, obj.ID)
+ }
+ }
+ return errUnknownStoreAction
+ },
+ NewStoreAction: func(c state.Event) (api.StoreAction, error) {
+ var sa api.StoreAction
+ switch v := c.(type) {
+ case state.EventCreateCluster:
+ sa.Action = api.StoreActionKindCreate
+ sa.Target = &api.StoreAction_Cluster{
+ Cluster: v.Cluster,
+ }
+ case state.EventUpdateCluster:
+ sa.Action = api.StoreActionKindUpdate
+ sa.Target = &api.StoreAction_Cluster{
+ Cluster: v.Cluster,
+ }
+ case state.EventDeleteCluster:
+ sa.Action = api.StoreActionKindRemove
+ sa.Target = &api.StoreAction_Cluster{
+ Cluster: v.Cluster,
+ }
+ default:
+ return api.StoreAction{}, errUnknownStoreAction
+ }
+ return sa, nil
+ },
+ })
+}
+
+type clusterEntry struct {
+ *api.Cluster
+}
+
+func (c clusterEntry) ID() string {
+ return c.Cluster.ID
+}
+
+func (c clusterEntry) Meta() api.Meta {
+ return c.Cluster.Meta
+}
+
+func (c clusterEntry) SetMeta(meta api.Meta) {
+ c.Cluster.Meta = meta
+}
+
+func (c clusterEntry) Copy() Object {
+ return clusterEntry{c.Cluster.Copy()}
+}
+
+func (c clusterEntry) EventCreate() state.Event {
+ return state.EventCreateCluster{Cluster: c.Cluster}
+}
+
+func (c clusterEntry) EventUpdate() state.Event {
+ return state.EventUpdateCluster{Cluster: c.Cluster}
+}
+
+func (c clusterEntry) EventDelete() state.Event {
+ return state.EventDeleteCluster{Cluster: c.Cluster}
+}
+
+// CreateCluster adds a new cluster to the store.
+// Returns ErrExist if the ID is already taken.
+func CreateCluster(tx Tx, c *api.Cluster) error {
+ // Ensure the name is not already in use.
+ if tx.lookup(tableCluster, indexName, strings.ToLower(c.Spec.Annotations.Name)) != nil {
+ return ErrNameConflict
+ }
+
+ return tx.create(tableCluster, clusterEntry{c})
+}
+
+// UpdateCluster updates an existing cluster in the store.
+// Returns ErrNotExist if the cluster doesn't exist.
+func UpdateCluster(tx Tx, c *api.Cluster) error {
+ // Ensure the name is either not in use or already used by this same Cluster.
+ if existing := tx.lookup(tableCluster, indexName, strings.ToLower(c.Spec.Annotations.Name)); existing != nil {
+ if existing.ID() != c.ID {
+ return ErrNameConflict
+ }
+ }
+
+ return tx.update(tableCluster, clusterEntry{c})
+}
+
+// DeleteCluster removes a cluster from the store.
+// Returns ErrNotExist if the cluster doesn't exist.
+func DeleteCluster(tx Tx, id string) error {
+ return tx.delete(tableCluster, id)
+}
+
+// GetCluster looks up a cluster by ID.
+// Returns nil if the cluster doesn't exist.
+func GetCluster(tx ReadTx, id string) *api.Cluster {
+ n := tx.get(tableCluster, id)
+ if n == nil {
+ return nil
+ }
+ return n.(clusterEntry).Cluster
+}
+
+// FindClusters selects a set of clusters and returns them.
+func FindClusters(tx ReadTx, by By) ([]*api.Cluster, error) {
+ checkType := func(by By) error {
+ switch by.(type) {
+ case byName, byIDPrefix:
+ return nil
+ default:
+ return ErrInvalidFindBy
+ }
+ }
+
+ clusterList := []*api.Cluster{}
+ appendResult := func(o Object) {
+ clusterList = append(clusterList, o.(clusterEntry).Cluster)
+ }
+
+ err := tx.find(tableCluster, by, checkType, appendResult)
+ return clusterList, err
+}
+
+type clusterIndexerByID struct{}
+
+func (ci clusterIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ci clusterIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
+ c, ok := obj.(clusterEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := c.Cluster.ID + "\x00"
+ return true, []byte(val), nil
+}
+
+func (ci clusterIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ return prefixFromArgs(args...)
+}
+
+type clusterIndexerByName struct{}
+
+func (ci clusterIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ci clusterIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
+ c, ok := obj.(clusterEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ return true, []byte(strings.ToLower(c.Spec.Annotations.Name) + "\x00"), nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/combinators.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/combinators.go
new file mode 100644
index 0000000000..7cea6b4370
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/combinators.go
@@ -0,0 +1,14 @@
+package store
+
+type orCombinator struct {
+ bys []By
+}
+
+func (b orCombinator) isBy() {
+}
+
+// Or returns a combinator that applies OR logic on all the supplied By
+// arguments.
+func Or(bys ...By) By {
+ return orCombinator{bys: bys}
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/memory.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/memory.go
new file mode 100644
index 0000000000..ec3b4e6fe6
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/memory.go
@@ -0,0 +1,731 @@
+package store
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ pb "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ "github.com/docker/swarmkit/manager/state/watch"
+ "github.com/docker/swarmkit/protobuf/ptypes"
+ memdb "github.com/hashicorp/go-memdb"
+ "golang.org/x/net/context"
+)
+
+const (
+ indexID = "id"
+ indexName = "name"
+ indexServiceID = "serviceid"
+ indexServiceMode = "servicemode"
+ indexNodeID = "nodeid"
+ indexSlot = "slot"
+ indexCN = "cn"
+ indexDesiredState = "desiredstate"
+ indexRole = "role"
+ indexMembership = "membership"
+
+ prefix = "_prefix"
+
+ // MaxChangesPerTransaction is the number of changes after which a new
+ // transaction should be started within Batch.
+ MaxChangesPerTransaction = 200
+
+ // MaxTransactionBytes is the maximum serialized transaction size.
+ MaxTransactionBytes = 1.5 * 1024 * 1024
+)
+
+var (
+ // ErrExist is returned by create operations if the provided ID is already
+ // taken.
+ ErrExist = errors.New("object already exists")
+
+ // ErrNotExist is returned by altering operations (update, delete) if the
+ // provided ID is not found.
+ ErrNotExist = errors.New("object does not exist")
+
+ // ErrNameConflict is returned by create/update if the object name is
+ // already in use by another object.
+ ErrNameConflict = errors.New("name conflicts with an existing object")
+
+ // ErrInvalidFindBy is returned if an unrecognized type is passed to Find.
+ ErrInvalidFindBy = errors.New("invalid find argument type")
+
+ // ErrSequenceConflict is returned when trying to update an object
+ // whose sequence information does not match the object in the store's.
+ ErrSequenceConflict = errors.New("update out of sequence")
+
+ objectStorers []ObjectStoreConfig
+ schema = &memdb.DBSchema{
+ Tables: map[string]*memdb.TableSchema{},
+ }
+ errUnknownStoreAction = errors.New("unknown store action")
+)
+
+func register(os ObjectStoreConfig) {
+ objectStorers = append(objectStorers, os)
+ schema.Tables[os.Name] = os.Table
+}
+
+// MemoryStore is a concurrency-safe, in-memory implementation of the Store
+// interface.
+type MemoryStore struct {
+ // updateLock must be held during an update transaction.
+ updateLock sync.Mutex
+
+ memDB *memdb.MemDB
+ queue *watch.Queue
+
+ proposer state.Proposer
+}
+
+// NewMemoryStore returns an in-memory store. The argument is an optional
+// Proposer which will be used to propagate changes to other members in a
+// cluster.
+func NewMemoryStore(proposer state.Proposer) *MemoryStore {
+ memDB, err := memdb.NewMemDB(schema)
+ if err != nil {
+ // This shouldn't fail
+ panic(err)
+ }
+
+ return &MemoryStore{
+ memDB: memDB,
+ queue: watch.NewQueue(0),
+ proposer: proposer,
+ }
+}
+
+func fromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func prefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := fromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// ReadTx is a read transaction. Note that transaction does not imply
+// any internal batching. It only means that the transaction presents a
+// consistent view of the data that cannot be affected by other
+// transactions.
+type ReadTx interface {
+ lookup(table, index, id string) Object
+ get(table, id string) Object
+ find(table string, by By, checkType func(By) error, appendResult func(Object)) error
+}
+
+type readTx struct {
+ memDBTx *memdb.Txn
+}
+
+// View executes a read transaction.
+func (s *MemoryStore) View(cb func(ReadTx)) {
+ memDBTx := s.memDB.Txn(false)
+
+ readTx := readTx{
+ memDBTx: memDBTx,
+ }
+ cb(readTx)
+ memDBTx.Commit()
+}
+
+// Tx is a read/write transaction. Note that transaction does not imply
+// any internal batching. The purpose of this transaction is to give the
+// user a guarantee that its changes won't be visible to other transactions
+// until the transaction is over.
+type Tx interface {
+ ReadTx
+ create(table string, o Object) error
+ update(table string, o Object) error
+ delete(table, id string) error
+}
+
+type tx struct {
+ readTx
+ curVersion *api.Version
+ changelist []state.Event
+}
+
+// ApplyStoreActions updates a store based on StoreAction messages.
+func (s *MemoryStore) ApplyStoreActions(actions []*api.StoreAction) error {
+ s.updateLock.Lock()
+ memDBTx := s.memDB.Txn(true)
+
+ tx := tx{
+ readTx: readTx{
+ memDBTx: memDBTx,
+ },
+ }
+
+ for _, sa := range actions {
+ if err := applyStoreAction(&tx, sa); err != nil {
+ memDBTx.Abort()
+ s.updateLock.Unlock()
+ return err
+ }
+ }
+
+ memDBTx.Commit()
+
+ for _, c := range tx.changelist {
+ s.queue.Publish(c)
+ }
+ if len(tx.changelist) != 0 {
+ s.queue.Publish(state.EventCommit{})
+ }
+ s.updateLock.Unlock()
+ return nil
+}
+
+func applyStoreAction(tx Tx, sa *api.StoreAction) error {
+ for _, os := range objectStorers {
+ err := os.ApplyStoreAction(tx, sa)
+ if err != errUnknownStoreAction {
+ return err
+ }
+ }
+
+ return errors.New("unrecognized action type")
+}
+
+func (s *MemoryStore) update(proposer state.Proposer, cb func(Tx) error) error {
+ s.updateLock.Lock()
+ memDBTx := s.memDB.Txn(true)
+
+ var curVersion *api.Version
+
+ if proposer != nil {
+ curVersion = proposer.GetVersion()
+ }
+
+ var tx tx
+ tx.init(memDBTx, curVersion)
+
+ err := cb(&tx)
+
+ if err == nil {
+ if proposer == nil {
+ memDBTx.Commit()
+ } else {
+ var sa []*api.StoreAction
+ sa, err = tx.changelistStoreActions()
+
+ if err == nil {
+ if sa != nil {
+ err = proposer.ProposeValue(context.Background(), sa, func() {
+ memDBTx.Commit()
+ })
+ } else {
+ memDBTx.Commit()
+ }
+ }
+ }
+ }
+
+ if err == nil {
+ for _, c := range tx.changelist {
+ s.queue.Publish(c)
+ }
+ if len(tx.changelist) != 0 {
+ s.queue.Publish(state.EventCommit{})
+ }
+ } else {
+ memDBTx.Abort()
+ }
+ s.updateLock.Unlock()
+ return err
+
+}
+
+func (s *MemoryStore) updateLocal(cb func(Tx) error) error {
+ return s.update(nil, cb)
+}
+
+// Update executes a read/write transaction.
+func (s *MemoryStore) Update(cb func(Tx) error) error {
+ return s.update(s.proposer, cb)
+}
+
+// Batch provides a mechanism to batch updates to a store.
+type Batch struct {
+ tx tx
+ store *MemoryStore
+ // applied counts the times Update has run successfully
+ applied int
+ // committed is the number of times Update had run successfully as of
+ // the time pending changes were committed.
+ committed int
+ // transactionSizeEstimate is the running count of the size of the
+ // current transaction.
+ transactionSizeEstimate int
+ // changelistLen is the last known length of the transaction's
+ // changelist.
+ changelistLen int
+ err error
+}
+
+// Update adds a single change to a batch. Each call to Update is atomic, but
+// different calls to Update may be spread across multiple transactions to
+// circumvent transaction size limits.
+func (batch *Batch) Update(cb func(Tx) error) error {
+ if batch.err != nil {
+ return batch.err
+ }
+
+ if err := cb(&batch.tx); err != nil {
+ return err
+ }
+
+ batch.applied++
+
+ for batch.changelistLen < len(batch.tx.changelist) {
+ sa, err := newStoreAction(batch.tx.changelist[batch.changelistLen])
+ if err != nil {
+ return err
+ }
+ batch.transactionSizeEstimate += sa.Size()
+ batch.changelistLen++
+ }
+
+ if batch.changelistLen >= MaxChangesPerTransaction || batch.transactionSizeEstimate >= (MaxTransactionBytes*3)/4 {
+ if err := batch.commit(); err != nil {
+ return err
+ }
+
+ // Yield the update lock
+ batch.store.updateLock.Unlock()
+ runtime.Gosched()
+ batch.store.updateLock.Lock()
+
+ batch.newTx()
+ }
+
+ return nil
+}
+
+func (batch *Batch) newTx() {
+ var curVersion *api.Version
+
+ if batch.store.proposer != nil {
+ curVersion = batch.store.proposer.GetVersion()
+ }
+
+ batch.tx.init(batch.store.memDB.Txn(true), curVersion)
+ batch.transactionSizeEstimate = 0
+ batch.changelistLen = 0
+}
+
+func (batch *Batch) commit() error {
+ if batch.store.proposer != nil {
+ var sa []*api.StoreAction
+ sa, batch.err = batch.tx.changelistStoreActions()
+
+ if batch.err == nil {
+ if sa != nil {
+ batch.err = batch.store.proposer.ProposeValue(context.Background(), sa, func() {
+ batch.tx.memDBTx.Commit()
+ })
+ } else {
+ batch.tx.memDBTx.Commit()
+ }
+ }
+ } else {
+ batch.tx.memDBTx.Commit()
+ }
+
+ if batch.err != nil {
+ batch.tx.memDBTx.Abort()
+ return batch.err
+ }
+
+ batch.committed = batch.applied
+
+ for _, c := range batch.tx.changelist {
+ batch.store.queue.Publish(c)
+ }
+ if len(batch.tx.changelist) != 0 {
+ batch.store.queue.Publish(state.EventCommit{})
+ }
+
+ return nil
+}
+
+// Batch performs one or more transactions that allow reads and writes
+// It invokes a callback that is passed a Batch object. The callback may
+// call batch.Update for each change it wants to make as part of the
+// batch. The changes in the batch may be split over multiple
+// transactions if necessary to keep transactions below the size limit.
+// Batch holds a lock over the state, but will yield this lock every
+// it creates a new transaction to allow other writers to proceed.
+// Thus, unrelated changes to the state may occur between calls to
+// batch.Update.
+//
+// This method allows the caller to iterate over a data set and apply
+// changes in sequence without holding the store write lock for an
+// excessive time, or producing a transaction that exceeds the maximum
+// size.
+//
+// Batch returns the number of calls to batch.Update whose changes were
+// successfully committed to the store.
+func (s *MemoryStore) Batch(cb func(*Batch) error) (int, error) {
+ s.updateLock.Lock()
+
+ batch := Batch{
+ store: s,
+ }
+ batch.newTx()
+
+ if err := cb(&batch); err != nil {
+ batch.tx.memDBTx.Abort()
+ s.updateLock.Unlock()
+ return batch.committed, err
+ }
+
+ err := batch.commit()
+ s.updateLock.Unlock()
+ return batch.committed, err
+}
+
+func (tx *tx) init(memDBTx *memdb.Txn, curVersion *api.Version) {
+ tx.memDBTx = memDBTx
+ tx.curVersion = curVersion
+ tx.changelist = nil
+}
+
+func newStoreAction(c state.Event) (*api.StoreAction, error) {
+ for _, os := range objectStorers {
+ sa, err := os.NewStoreAction(c)
+ if err == nil {
+ return &sa, nil
+ } else if err != errUnknownStoreAction {
+ return nil, err
+ }
+ }
+
+ return nil, errors.New("unrecognized event type")
+}
+
+func (tx tx) changelistStoreActions() ([]*api.StoreAction, error) {
+ var actions []*api.StoreAction
+
+ for _, c := range tx.changelist {
+ sa, err := newStoreAction(c)
+ if err != nil {
+ return nil, err
+ }
+ actions = append(actions, sa)
+ }
+
+ return actions, nil
+}
+
+// lookup is an internal typed wrapper around memdb.
+func (tx readTx) lookup(table, index, id string) Object {
+ j, err := tx.memDBTx.First(table, index, id)
+ if err != nil {
+ return nil
+ }
+ if j != nil {
+ return j.(Object)
+ }
+ return nil
+}
+
+// create adds a new object to the store.
+// Returns ErrExist if the ID is already taken.
+func (tx *tx) create(table string, o Object) error {
+ if tx.lookup(table, indexID, o.ID()) != nil {
+ return ErrExist
+ }
+
+ copy := o.Copy()
+ meta := copy.Meta()
+ if err := touchMeta(&meta, tx.curVersion); err != nil {
+ return err
+ }
+ copy.SetMeta(meta)
+
+ err := tx.memDBTx.Insert(table, copy)
+ if err == nil {
+ tx.changelist = append(tx.changelist, copy.EventCreate())
+ o.SetMeta(meta)
+ }
+ return err
+}
+
+// Update updates an existing object in the store.
+// Returns ErrNotExist if the object doesn't exist.
+func (tx *tx) update(table string, o Object) error {
+ oldN := tx.lookup(table, indexID, o.ID())
+ if oldN == nil {
+ return ErrNotExist
+ }
+
+ if tx.curVersion != nil {
+ if oldN.(Object).Meta().Version != o.Meta().Version {
+ return ErrSequenceConflict
+ }
+ }
+
+ copy := o.Copy()
+ meta := copy.Meta()
+ if err := touchMeta(&meta, tx.curVersion); err != nil {
+ return err
+ }
+ copy.SetMeta(meta)
+
+ err := tx.memDBTx.Insert(table, copy)
+ if err == nil {
+ tx.changelist = append(tx.changelist, copy.EventUpdate())
+ o.SetMeta(meta)
+ }
+ return err
+}
+
+// Delete removes an object from the store.
+// Returns ErrNotExist if the object doesn't exist.
+func (tx *tx) delete(table, id string) error {
+ n := tx.lookup(table, indexID, id)
+ if n == nil {
+ return ErrNotExist
+ }
+
+ err := tx.memDBTx.Delete(table, n)
+ if err == nil {
+ tx.changelist = append(tx.changelist, n.EventDelete())
+ }
+ return err
+}
+
+// Get looks up an object by ID.
+// Returns nil if the object doesn't exist.
+func (tx readTx) get(table, id string) Object {
+ o := tx.lookup(table, indexID, id)
+ if o == nil {
+ return nil
+ }
+ return o.Copy()
+}
+
+// findIterators returns a slice of iterators. The union of items from these
+// iterators provides the result of the query.
+func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([]memdb.ResultIterator, error) {
+ switch by.(type) {
+ case byAll, orCombinator: // generic types
+ default: // all other types
+ if err := checkType(by); err != nil {
+ return nil, err
+ }
+ }
+
+ switch v := by.(type) {
+ case byAll:
+ it, err := tx.memDBTx.Get(table, indexID)
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case orCombinator:
+ var iters []memdb.ResultIterator
+ for _, subBy := range v.bys {
+ it, err := tx.findIterators(table, subBy, checkType)
+ if err != nil {
+ return nil, err
+ }
+ iters = append(iters, it...)
+ }
+ return iters, nil
+ case byName:
+ it, err := tx.memDBTx.Get(table, indexName, strings.ToLower(string(v)))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case byCN:
+ it, err := tx.memDBTx.Get(table, indexCN, string(v))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case byIDPrefix:
+ it, err := tx.memDBTx.Get(table, indexID+prefix, string(v))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case byNode:
+ it, err := tx.memDBTx.Get(table, indexNodeID, string(v))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case byService:
+ it, err := tx.memDBTx.Get(table, indexServiceID, string(v))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case bySlot:
+ it, err := tx.memDBTx.Get(table, indexSlot, v.serviceID+"\x00"+strconv.FormatUint(uint64(v.slot), 10))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case byDesiredState:
+ it, err := tx.memDBTx.Get(table, indexDesiredState, strconv.FormatInt(int64(v), 10))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case byRole:
+ it, err := tx.memDBTx.Get(table, indexRole, strconv.FormatInt(int64(v), 10))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ case byMembership:
+ it, err := tx.memDBTx.Get(table, indexMembership, strconv.FormatInt(int64(v), 10))
+ if err != nil {
+ return nil, err
+ }
+ return []memdb.ResultIterator{it}, nil
+ default:
+ return nil, ErrInvalidFindBy
+ }
+}
+
+// find selects a set of objects calls a callback for each matching object.
+func (tx readTx) find(table string, by By, checkType func(By) error, appendResult func(Object)) error {
+ fromResultIterators := func(its ...memdb.ResultIterator) {
+ ids := make(map[string]struct{})
+ for _, it := range its {
+ for {
+ obj := it.Next()
+ if obj == nil {
+ break
+ }
+ o := obj.(Object)
+ id := o.ID()
+ if _, exists := ids[id]; !exists {
+ appendResult(o.Copy())
+ ids[id] = struct{}{}
+ }
+ }
+ }
+ }
+
+ iters, err := tx.findIterators(table, by, checkType)
+ if err != nil {
+ return err
+ }
+
+ fromResultIterators(iters...)
+
+ return nil
+}
+
+// Save serializes the data in the store.
+func (s *MemoryStore) Save(tx ReadTx) (*pb.StoreSnapshot, error) {
+ var snapshot pb.StoreSnapshot
+ for _, os := range objectStorers {
+ if err := os.Save(tx, &snapshot); err != nil {
+ return nil, err
+ }
+ }
+
+ return &snapshot, nil
+}
+
+// Restore sets the contents of the store to the serialized data in the
+// argument.
+func (s *MemoryStore) Restore(snapshot *pb.StoreSnapshot) error {
+ return s.updateLocal(func(tx Tx) error {
+ for _, os := range objectStorers {
+ if err := os.Restore(tx, snapshot); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+// WatchQueue returns the publish/subscribe queue.
+func (s *MemoryStore) WatchQueue() *watch.Queue {
+ return s.queue
+}
+
+// ViewAndWatch calls a callback which can observe the state of this
+// MemoryStore. It also returns a channel that will return further events from
+// this point so the snapshot can be kept up to date. The watch channel must be
+// released with watch.StopWatch when it is no longer needed. The channel is
+// guaranteed to get all events after the moment of the snapshot, and only
+// those events.
+func ViewAndWatch(store *MemoryStore, cb func(ReadTx) error, specifiers ...state.Event) (watch chan events.Event, cancel func(), err error) {
+ // Using Update to lock the store and guarantee consistency between
+ // the watcher and the the state seen by the callback. snapshotReadTx
+ // exposes this Tx as a ReadTx so the callback can't modify it.
+ err = store.Update(func(tx Tx) error {
+ if err := cb(tx); err != nil {
+ return err
+ }
+ watch, cancel = state.Watch(store.WatchQueue(), specifiers...)
+ return nil
+ })
+ if watch != nil && err != nil {
+ cancel()
+ cancel = nil
+ watch = nil
+ }
+ return
+}
+
+// touchMeta updates an object's timestamps when necessary and bumps the version
+// if provided.
+func touchMeta(meta *api.Meta, version *api.Version) error {
+ // Skip meta update if version is not defined as it means we're applying
+ // from raft or restoring from a snapshot.
+ if version == nil {
+ return nil
+ }
+
+ now, err := ptypes.TimestampProto(time.Now())
+ if err != nil {
+ return err
+ }
+
+ meta.Version = *version
+
+ // Updated CreatedAt if not defined
+ if meta.CreatedAt == nil {
+ meta.CreatedAt = now
+ }
+
+ meta.UpdatedAt = now
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/networks.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/networks.go
new file mode 100644
index 0000000000..4afea236e1
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/networks.go
@@ -0,0 +1,221 @@
+package store
+
+import (
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ memdb "github.com/hashicorp/go-memdb"
+)
+
+const tableNetwork = "network"
+
+func init() {
+ register(ObjectStoreConfig{
+ Name: tableNetwork,
+ Table: &memdb.TableSchema{
+ Name: tableNetwork,
+ Indexes: map[string]*memdb.IndexSchema{
+ indexID: {
+ Name: indexID,
+ Unique: true,
+ Indexer: networkIndexerByID{},
+ },
+ indexName: {
+ Name: indexName,
+ Unique: true,
+ Indexer: networkIndexerByName{},
+ },
+ },
+ },
+ Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
+ var err error
+ snapshot.Networks, err = FindNetworks(tx, All)
+ return err
+ },
+ Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
+ networks, err := FindNetworks(tx, All)
+ if err != nil {
+ return err
+ }
+ for _, n := range networks {
+ if err := DeleteNetwork(tx, n.ID); err != nil {
+ return err
+ }
+ }
+ for _, n := range snapshot.Networks {
+ if err := CreateNetwork(tx, n); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
+ switch v := sa.Target.(type) {
+ case *api.StoreAction_Network:
+ obj := v.Network
+ switch sa.Action {
+ case api.StoreActionKindCreate:
+ return CreateNetwork(tx, obj)
+ case api.StoreActionKindUpdate:
+ return UpdateNetwork(tx, obj)
+ case api.StoreActionKindRemove:
+ return DeleteNetwork(tx, obj.ID)
+ }
+ }
+ return errUnknownStoreAction
+ },
+ NewStoreAction: func(c state.Event) (api.StoreAction, error) {
+ var sa api.StoreAction
+ switch v := c.(type) {
+ case state.EventCreateNetwork:
+ sa.Action = api.StoreActionKindCreate
+ sa.Target = &api.StoreAction_Network{
+ Network: v.Network,
+ }
+ case state.EventUpdateNetwork:
+ sa.Action = api.StoreActionKindUpdate
+ sa.Target = &api.StoreAction_Network{
+ Network: v.Network,
+ }
+ case state.EventDeleteNetwork:
+ sa.Action = api.StoreActionKindRemove
+ sa.Target = &api.StoreAction_Network{
+ Network: v.Network,
+ }
+ default:
+ return api.StoreAction{}, errUnknownStoreAction
+ }
+ return sa, nil
+ },
+ })
+}
+
+type networkEntry struct {
+ *api.Network
+}
+
+func (n networkEntry) ID() string {
+ return n.Network.ID
+}
+
+func (n networkEntry) Meta() api.Meta {
+ return n.Network.Meta
+}
+
+func (n networkEntry) SetMeta(meta api.Meta) {
+ n.Network.Meta = meta
+}
+
+func (n networkEntry) Copy() Object {
+ return networkEntry{n.Network.Copy()}
+}
+
+func (n networkEntry) EventCreate() state.Event {
+ return state.EventCreateNetwork{Network: n.Network}
+}
+
+func (n networkEntry) EventUpdate() state.Event {
+ return state.EventUpdateNetwork{Network: n.Network}
+}
+
+func (n networkEntry) EventDelete() state.Event {
+ return state.EventDeleteNetwork{Network: n.Network}
+}
+
+// CreateNetwork adds a new network to the store.
+// Returns ErrExist if the ID is already taken.
+func CreateNetwork(tx Tx, n *api.Network) error {
+ // Ensure the name is not already in use.
+ if tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)) != nil {
+ return ErrNameConflict
+ }
+
+ return tx.create(tableNetwork, networkEntry{n})
+}
+
+// UpdateNetwork updates an existing network in the store.
+// Returns ErrNotExist if the network doesn't exist.
+func UpdateNetwork(tx Tx, n *api.Network) error {
+ // Ensure the name is either not in use or already used by this same Network.
+ if existing := tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)); existing != nil {
+ if existing.ID() != n.ID {
+ return ErrNameConflict
+ }
+ }
+
+ return tx.update(tableNetwork, networkEntry{n})
+}
+
+// DeleteNetwork removes a network from the store.
+// Returns ErrNotExist if the network doesn't exist.
+func DeleteNetwork(tx Tx, id string) error {
+ return tx.delete(tableNetwork, id)
+}
+
+// GetNetwork looks up a network by ID.
+// Returns nil if the network doesn't exist.
+func GetNetwork(tx ReadTx, id string) *api.Network {
+ n := tx.get(tableNetwork, id)
+ if n == nil {
+ return nil
+ }
+ return n.(networkEntry).Network
+}
+
+// FindNetworks selects a set of networks and returns them.
+func FindNetworks(tx ReadTx, by By) ([]*api.Network, error) {
+ checkType := func(by By) error {
+ switch by.(type) {
+ case byName, byIDPrefix:
+ return nil
+ default:
+ return ErrInvalidFindBy
+ }
+ }
+
+ networkList := []*api.Network{}
+ appendResult := func(o Object) {
+ networkList = append(networkList, o.(networkEntry).Network)
+ }
+
+ err := tx.find(tableNetwork, by, checkType, appendResult)
+ return networkList, err
+}
+
+type networkIndexerByID struct{}
+
+func (ni networkIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ni networkIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
+ n, ok := obj.(networkEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := n.Network.ID + "\x00"
+ return true, []byte(val), nil
+}
+
+func (ni networkIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ return prefixFromArgs(args...)
+}
+
+type networkIndexerByName struct{}
+
+func (ni networkIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ni networkIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
+ n, ok := obj.(networkEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ return true, []byte(strings.ToLower(n.Spec.Annotations.Name) + "\x00"), nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/nodes.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/nodes.go
new file mode 100644
index 0000000000..9b571def13
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/nodes.go
@@ -0,0 +1,254 @@
+package store
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ memdb "github.com/hashicorp/go-memdb"
+)
+
+const tableNode = "node"
+
+func init() {
+ register(ObjectStoreConfig{
+ Name: tableNode,
+ Table: &memdb.TableSchema{
+ Name: tableNode,
+ Indexes: map[string]*memdb.IndexSchema{
+ indexID: {
+ Name: indexID,
+ Unique: true,
+ Indexer: nodeIndexerByID{},
+ },
+ // TODO(aluzzardi): Use `indexHostname` instead.
+ indexName: {
+ Name: indexName,
+ AllowMissing: true,
+ Indexer: nodeIndexerByHostname{},
+ },
+ indexRole: {
+ Name: indexRole,
+ Indexer: nodeIndexerByRole{},
+ },
+ indexMembership: {
+ Name: indexMembership,
+ Indexer: nodeIndexerByMembership{},
+ },
+ },
+ },
+ Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
+ var err error
+ snapshot.Nodes, err = FindNodes(tx, All)
+ return err
+ },
+ Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
+ nodes, err := FindNodes(tx, All)
+ if err != nil {
+ return err
+ }
+ for _, n := range nodes {
+ if err := DeleteNode(tx, n.ID); err != nil {
+ return err
+ }
+ }
+ for _, n := range snapshot.Nodes {
+ if err := CreateNode(tx, n); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
+ switch v := sa.Target.(type) {
+ case *api.StoreAction_Node:
+ obj := v.Node
+ switch sa.Action {
+ case api.StoreActionKindCreate:
+ return CreateNode(tx, obj)
+ case api.StoreActionKindUpdate:
+ return UpdateNode(tx, obj)
+ case api.StoreActionKindRemove:
+ return DeleteNode(tx, obj.ID)
+ }
+ }
+ return errUnknownStoreAction
+ },
+ NewStoreAction: func(c state.Event) (api.StoreAction, error) {
+ var sa api.StoreAction
+ switch v := c.(type) {
+ case state.EventCreateNode:
+ sa.Action = api.StoreActionKindCreate
+ sa.Target = &api.StoreAction_Node{
+ Node: v.Node,
+ }
+ case state.EventUpdateNode:
+ sa.Action = api.StoreActionKindUpdate
+ sa.Target = &api.StoreAction_Node{
+ Node: v.Node,
+ }
+ case state.EventDeleteNode:
+ sa.Action = api.StoreActionKindRemove
+ sa.Target = &api.StoreAction_Node{
+ Node: v.Node,
+ }
+ default:
+ return api.StoreAction{}, errUnknownStoreAction
+ }
+ return sa, nil
+ },
+ })
+}
+
+type nodeEntry struct {
+ *api.Node
+}
+
+func (n nodeEntry) ID() string {
+ return n.Node.ID
+}
+
+func (n nodeEntry) Meta() api.Meta {
+ return n.Node.Meta
+}
+
+func (n nodeEntry) SetMeta(meta api.Meta) {
+ n.Node.Meta = meta
+}
+
+func (n nodeEntry) Copy() Object {
+ return nodeEntry{n.Node.Copy()}
+}
+
+func (n nodeEntry) EventCreate() state.Event {
+ return state.EventCreateNode{Node: n.Node}
+}
+
+func (n nodeEntry) EventUpdate() state.Event {
+ return state.EventUpdateNode{Node: n.Node}
+}
+
+func (n nodeEntry) EventDelete() state.Event {
+ return state.EventDeleteNode{Node: n.Node}
+}
+
+// CreateNode adds a new node to the store.
+// Returns ErrExist if the ID is already taken.
+func CreateNode(tx Tx, n *api.Node) error {
+ return tx.create(tableNode, nodeEntry{n})
+}
+
+// UpdateNode updates an existing node in the store.
+// Returns ErrNotExist if the node doesn't exist.
+func UpdateNode(tx Tx, n *api.Node) error {
+ return tx.update(tableNode, nodeEntry{n})
+}
+
+// DeleteNode removes a node from the store.
+// Returns ErrNotExist if the node doesn't exist.
+func DeleteNode(tx Tx, id string) error {
+ return tx.delete(tableNode, id)
+}
+
+// GetNode looks up a node by ID.
+// Returns nil if the node doesn't exist.
+func GetNode(tx ReadTx, id string) *api.Node {
+ n := tx.get(tableNode, id)
+ if n == nil {
+ return nil
+ }
+ return n.(nodeEntry).Node
+}
+
+// FindNodes selects a set of nodes and returns them.
+func FindNodes(tx ReadTx, by By) ([]*api.Node, error) {
+ checkType := func(by By) error {
+ switch by.(type) {
+ case byName, byIDPrefix, byRole, byMembership:
+ return nil
+ default:
+ return ErrInvalidFindBy
+ }
+ }
+
+ nodeList := []*api.Node{}
+ appendResult := func(o Object) {
+ nodeList = append(nodeList, o.(nodeEntry).Node)
+ }
+
+ err := tx.find(tableNode, by, checkType, appendResult)
+ return nodeList, err
+}
+
+type nodeIndexerByID struct{}
+
+func (ni nodeIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ni nodeIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
+ n, ok := obj.(nodeEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := n.Node.ID + "\x00"
+ return true, []byte(val), nil
+}
+
+func (ni nodeIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ return prefixFromArgs(args...)
+}
+
+type nodeIndexerByHostname struct{}
+
+func (ni nodeIndexerByHostname) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ni nodeIndexerByHostname) FromObject(obj interface{}) (bool, []byte, error) {
+ n, ok := obj.(nodeEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ if n.Description == nil {
+ return false, nil, nil
+ }
+ // Add the null character as a terminator
+ return true, []byte(strings.ToLower(n.Description.Hostname) + "\x00"), nil
+}
+
+type nodeIndexerByRole struct{}
+
+func (ni nodeIndexerByRole) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ni nodeIndexerByRole) FromObject(obj interface{}) (bool, []byte, error) {
+ n, ok := obj.(nodeEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ return true, []byte(strconv.FormatInt(int64(n.Spec.Role), 10) + "\x00"), nil
+}
+
+type nodeIndexerByMembership struct{}
+
+func (ni nodeIndexerByMembership) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ni nodeIndexerByMembership) FromObject(obj interface{}) (bool, []byte, error) {
+ n, ok := obj.(nodeEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ return true, []byte(strconv.FormatInt(int64(n.Spec.Membership), 10) + "\x00"), nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/object.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/object.go
new file mode 100644
index 0000000000..e86e9a6ca2
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/object.go
@@ -0,0 +1,29 @@
+package store
+
+import (
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ memdb "github.com/hashicorp/go-memdb"
+)
+
+// Object is a generic object that can be handled by the store.
+type Object interface {
+ ID() string // Get ID
+ Meta() api.Meta // Retrieve metadata
+ SetMeta(api.Meta) // Set metadata
+ Copy() Object // Return a copy of this object
+ EventCreate() state.Event // Return a creation event
+ EventUpdate() state.Event // Return an update event
+ EventDelete() state.Event // Return a deletion event
+}
+
+// ObjectStoreConfig provides the necessary methods to store a particular object
+// type inside MemoryStore.
+type ObjectStoreConfig struct {
+ Name string
+ Table *memdb.TableSchema
+ Save func(ReadTx, *api.StoreSnapshot) error
+ Restore func(Tx, *api.StoreSnapshot) error
+ ApplyStoreAction func(Tx, *api.StoreAction) error
+ NewStoreAction func(state.Event) (api.StoreAction, error)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/services.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/services.go
new file mode 100644
index 0000000000..2933253a22
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/services.go
@@ -0,0 +1,221 @@
+package store
+
+import (
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ memdb "github.com/hashicorp/go-memdb"
+)
+
+const tableService = "service"
+
+func init() {
+ register(ObjectStoreConfig{
+ Name: tableService,
+ Table: &memdb.TableSchema{
+ Name: tableService,
+ Indexes: map[string]*memdb.IndexSchema{
+ indexID: {
+ Name: indexID,
+ Unique: true,
+ Indexer: serviceIndexerByID{},
+ },
+ indexName: {
+ Name: indexName,
+ Unique: true,
+ Indexer: serviceIndexerByName{},
+ },
+ },
+ },
+ Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
+ var err error
+ snapshot.Services, err = FindServices(tx, All)
+ return err
+ },
+ Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
+ services, err := FindServices(tx, All)
+ if err != nil {
+ return err
+ }
+ for _, s := range services {
+ if err := DeleteService(tx, s.ID); err != nil {
+ return err
+ }
+ }
+ for _, s := range snapshot.Services {
+ if err := CreateService(tx, s); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
+ switch v := sa.Target.(type) {
+ case *api.StoreAction_Service:
+ obj := v.Service
+ switch sa.Action {
+ case api.StoreActionKindCreate:
+ return CreateService(tx, obj)
+ case api.StoreActionKindUpdate:
+ return UpdateService(tx, obj)
+ case api.StoreActionKindRemove:
+ return DeleteService(tx, obj.ID)
+ }
+ }
+ return errUnknownStoreAction
+ },
+ NewStoreAction: func(c state.Event) (api.StoreAction, error) {
+ var sa api.StoreAction
+ switch v := c.(type) {
+ case state.EventCreateService:
+ sa.Action = api.StoreActionKindCreate
+ sa.Target = &api.StoreAction_Service{
+ Service: v.Service,
+ }
+ case state.EventUpdateService:
+ sa.Action = api.StoreActionKindUpdate
+ sa.Target = &api.StoreAction_Service{
+ Service: v.Service,
+ }
+ case state.EventDeleteService:
+ sa.Action = api.StoreActionKindRemove
+ sa.Target = &api.StoreAction_Service{
+ Service: v.Service,
+ }
+ default:
+ return api.StoreAction{}, errUnknownStoreAction
+ }
+ return sa, nil
+ },
+ })
+}
+
+type serviceEntry struct {
+ *api.Service
+}
+
+func (s serviceEntry) ID() string {
+ return s.Service.ID
+}
+
+func (s serviceEntry) Meta() api.Meta {
+ return s.Service.Meta
+}
+
+func (s serviceEntry) SetMeta(meta api.Meta) {
+ s.Service.Meta = meta
+}
+
+func (s serviceEntry) Copy() Object {
+ return serviceEntry{s.Service.Copy()}
+}
+
+func (s serviceEntry) EventCreate() state.Event {
+ return state.EventCreateService{Service: s.Service}
+}
+
+func (s serviceEntry) EventUpdate() state.Event {
+ return state.EventUpdateService{Service: s.Service}
+}
+
+func (s serviceEntry) EventDelete() state.Event {
+ return state.EventDeleteService{Service: s.Service}
+}
+
+// CreateService adds a new service to the store.
+// Returns ErrExist if the ID is already taken.
+func CreateService(tx Tx, s *api.Service) error {
+ // Ensure the name is not already in use.
+ if tx.lookup(tableService, indexName, strings.ToLower(s.Spec.Annotations.Name)) != nil {
+ return ErrNameConflict
+ }
+
+ return tx.create(tableService, serviceEntry{s})
+}
+
+// UpdateService updates an existing service in the store.
+// Returns ErrNotExist if the service doesn't exist.
+func UpdateService(tx Tx, s *api.Service) error {
+ // Ensure the name is either not in use or already used by this same Service.
+ if existing := tx.lookup(tableService, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil {
+ if existing.ID() != s.ID {
+ return ErrNameConflict
+ }
+ }
+
+ return tx.update(tableService, serviceEntry{s})
+}
+
+// DeleteService removes a service from the store.
+// Returns ErrNotExist if the service doesn't exist.
+func DeleteService(tx Tx, id string) error {
+ return tx.delete(tableService, id)
+}
+
+// GetService looks up a service by ID.
+// Returns nil if the service doesn't exist.
+func GetService(tx ReadTx, id string) *api.Service {
+ s := tx.get(tableService, id)
+ if s == nil {
+ return nil
+ }
+ return s.(serviceEntry).Service
+}
+
+// FindServices selects a set of services and returns them.
+func FindServices(tx ReadTx, by By) ([]*api.Service, error) {
+ checkType := func(by By) error {
+ switch by.(type) {
+ case byName, byIDPrefix:
+ return nil
+ default:
+ return ErrInvalidFindBy
+ }
+ }
+
+ serviceList := []*api.Service{}
+ appendResult := func(o Object) {
+ serviceList = append(serviceList, o.(serviceEntry).Service)
+ }
+
+ err := tx.find(tableService, by, checkType, appendResult)
+ return serviceList, err
+}
+
+type serviceIndexerByID struct{}
+
+func (si serviceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (si serviceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
+ s, ok := obj.(serviceEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := s.Service.ID + "\x00"
+ return true, []byte(val), nil
+}
+
+func (si serviceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ return prefixFromArgs(args...)
+}
+
+type serviceIndexerByName struct{}
+
+func (si serviceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (si serviceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
+ s, ok := obj.(serviceEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ return true, []byte(strings.ToLower(s.Spec.Annotations.Name) + "\x00"), nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/store/tasks.go b/vendor/src/github.com/docker/swarmkit/manager/state/store/tasks.go
new file mode 100644
index 0000000000..c8f3ea336c
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/store/tasks.go
@@ -0,0 +1,296 @@
+package store
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state"
+ memdb "github.com/hashicorp/go-memdb"
+)
+
+const tableTask = "task"
+
+func init() {
+ register(ObjectStoreConfig{
+ Name: tableTask,
+ Table: &memdb.TableSchema{
+ Name: tableTask,
+ Indexes: map[string]*memdb.IndexSchema{
+ indexID: {
+ Name: indexID,
+ Unique: true,
+ Indexer: taskIndexerByID{},
+ },
+ indexName: {
+ Name: indexName,
+ AllowMissing: true,
+ Indexer: taskIndexerByName{},
+ },
+ indexServiceID: {
+ Name: indexServiceID,
+ AllowMissing: true,
+ Indexer: taskIndexerByServiceID{},
+ },
+ indexNodeID: {
+ Name: indexNodeID,
+ AllowMissing: true,
+ Indexer: taskIndexerByNodeID{},
+ },
+ indexSlot: {
+ Name: indexSlot,
+ AllowMissing: true,
+ Indexer: taskIndexerBySlot{},
+ },
+ indexDesiredState: {
+ Name: indexDesiredState,
+ Indexer: taskIndexerByDesiredState{},
+ },
+ },
+ },
+ Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
+ var err error
+ snapshot.Tasks, err = FindTasks(tx, All)
+ return err
+ },
+ Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
+ tasks, err := FindTasks(tx, All)
+ if err != nil {
+ return err
+ }
+ for _, t := range tasks {
+ if err := DeleteTask(tx, t.ID); err != nil {
+ return err
+ }
+ }
+ for _, t := range snapshot.Tasks {
+ if err := CreateTask(tx, t); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
+ switch v := sa.Target.(type) {
+ case *api.StoreAction_Task:
+ obj := v.Task
+ switch sa.Action {
+ case api.StoreActionKindCreate:
+ return CreateTask(tx, obj)
+ case api.StoreActionKindUpdate:
+ return UpdateTask(tx, obj)
+ case api.StoreActionKindRemove:
+ return DeleteTask(tx, obj.ID)
+ }
+ }
+ return errUnknownStoreAction
+ },
+ NewStoreAction: func(c state.Event) (api.StoreAction, error) {
+ var sa api.StoreAction
+ switch v := c.(type) {
+ case state.EventCreateTask:
+ sa.Action = api.StoreActionKindCreate
+ sa.Target = &api.StoreAction_Task{
+ Task: v.Task,
+ }
+ case state.EventUpdateTask:
+ sa.Action = api.StoreActionKindUpdate
+ sa.Target = &api.StoreAction_Task{
+ Task: v.Task,
+ }
+ case state.EventDeleteTask:
+ sa.Action = api.StoreActionKindRemove
+ sa.Target = &api.StoreAction_Task{
+ Task: v.Task,
+ }
+ default:
+ return api.StoreAction{}, errUnknownStoreAction
+ }
+ return sa, nil
+ },
+ })
+}
+
+type taskEntry struct {
+ *api.Task
+}
+
+func (t taskEntry) ID() string {
+ return t.Task.ID
+}
+
+func (t taskEntry) Meta() api.Meta {
+ return t.Task.Meta
+}
+
+func (t taskEntry) SetMeta(meta api.Meta) {
+ t.Task.Meta = meta
+}
+
+func (t taskEntry) Copy() Object {
+ return taskEntry{t.Task.Copy()}
+}
+
+func (t taskEntry) EventCreate() state.Event {
+ return state.EventCreateTask{Task: t.Task}
+}
+
+func (t taskEntry) EventUpdate() state.Event {
+ return state.EventUpdateTask{Task: t.Task}
+}
+
+func (t taskEntry) EventDelete() state.Event {
+ return state.EventDeleteTask{Task: t.Task}
+}
+
+// CreateTask adds a new task to the store.
+// Returns ErrExist if the ID is already taken.
+func CreateTask(tx Tx, t *api.Task) error {
+ return tx.create(tableTask, taskEntry{t})
+}
+
+// UpdateTask updates an existing task in the store.
+// Returns ErrNotExist if the node doesn't exist.
+func UpdateTask(tx Tx, t *api.Task) error {
+ return tx.update(tableTask, taskEntry{t})
+}
+
+// DeleteTask removes a task from the store.
+// Returns ErrNotExist if the task doesn't exist.
+func DeleteTask(tx Tx, id string) error {
+ return tx.delete(tableTask, id)
+}
+
+// GetTask looks up a task by ID.
+// Returns nil if the task doesn't exist.
+func GetTask(tx ReadTx, id string) *api.Task {
+ t := tx.get(tableTask, id)
+ if t == nil {
+ return nil
+ }
+ return t.(taskEntry).Task
+}
+
+// FindTasks selects a set of tasks and returns them.
+func FindTasks(tx ReadTx, by By) ([]*api.Task, error) {
+ checkType := func(by By) error {
+ switch by.(type) {
+ case byName, byIDPrefix, byDesiredState, byNode, byService, bySlot:
+ return nil
+ default:
+ return ErrInvalidFindBy
+ }
+ }
+
+ taskList := []*api.Task{}
+ appendResult := func(o Object) {
+ taskList = append(taskList, o.(taskEntry).Task)
+ }
+
+ err := tx.find(tableTask, by, checkType, appendResult)
+ return taskList, err
+}
+
+type taskIndexerByID struct{}
+
+func (ti taskIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ti taskIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
+ t, ok := obj.(taskEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := t.Task.ID + "\x00"
+ return true, []byte(val), nil
+}
+
+func (ti taskIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ return prefixFromArgs(args...)
+}
+
+type taskIndexerByName struct{}
+
+func (ti taskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ti taskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
+ t, ok := obj.(taskEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ return true, []byte(strings.ToLower(t.ServiceAnnotations.Name) + "\x00"), nil
+}
+
+type taskIndexerByServiceID struct{}
+
+func (ti taskIndexerByServiceID) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ti taskIndexerByServiceID) FromObject(obj interface{}) (bool, []byte, error) {
+ t, ok := obj.(taskEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := t.ServiceID + "\x00"
+ return true, []byte(val), nil
+}
+
+type taskIndexerByNodeID struct{}
+
+func (ti taskIndexerByNodeID) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ti taskIndexerByNodeID) FromObject(obj interface{}) (bool, []byte, error) {
+ t, ok := obj.(taskEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := t.NodeID + "\x00"
+ return true, []byte(val), nil
+}
+
+type taskIndexerBySlot struct{}
+
+func (ti taskIndexerBySlot) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ti taskIndexerBySlot) FromObject(obj interface{}) (bool, []byte, error) {
+ t, ok := obj.(taskEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ val := t.ServiceID + "\x00" + strconv.FormatUint(t.Slot, 10) + "\x00"
+ return true, []byte(val), nil
+}
+
+type taskIndexerByDesiredState struct{}
+
+func (ni taskIndexerByDesiredState) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromArgs(args...)
+}
+
+func (ni taskIndexerByDesiredState) FromObject(obj interface{}) (bool, []byte, error) {
+ n, ok := obj.(taskEntry)
+ if !ok {
+ panic("unexpected type passed to FromObject")
+ }
+
+ // Add the null character as a terminator
+ return true, []byte(strconv.FormatInt(int64(n.DesiredState), 10) + "\x00"), nil
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/watch.go b/vendor/src/github.com/docker/swarmkit/manager/state/watch.go
new file mode 100644
index 0000000000..099d7a69d6
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/watch.go
@@ -0,0 +1,488 @@
+package state
+
+import (
+ "github.com/docker/go-events"
+ "github.com/docker/swarmkit/api"
+ "github.com/docker/swarmkit/manager/state/watch"
+)
+
+// Event is the type used for events passed over watcher channels, and also
+// the type used to specify filtering in calls to Watch.
+type Event interface {
+ // TODO(stevvooe): Consider whether it makes sense to squish both the
+ // matcher type and the primary type into the same type. It might be better
+ // to build a matcher from an event prototype.
+
+ // matches checks if this item in a watch queue matches the event
+ // description.
+ matches(events.Event) bool
+}
+
+// EventCommit delineates a transaction boundary.
+type EventCommit struct{}
+
+func (e EventCommit) matches(watchEvent events.Event) bool {
+ _, ok := watchEvent.(EventCommit)
+ return ok
+}
+
+// TaskCheckFunc is the type of function used to perform filtering checks on
+// api.Task structures.
+type TaskCheckFunc func(t1, t2 *api.Task) bool
+
+// TaskCheckID is a TaskCheckFunc for matching task IDs.
+func TaskCheckID(t1, t2 *api.Task) bool {
+ return t1.ID == t2.ID
+}
+
+// TaskCheckNodeID is a TaskCheckFunc for matching node IDs.
+func TaskCheckNodeID(t1, t2 *api.Task) bool {
+ return t1.NodeID == t2.NodeID
+}
+
+// TaskCheckStateGreaterThan is a TaskCheckFunc for checking task state.
+func TaskCheckStateGreaterThan(t1, t2 *api.Task) bool {
+ return t2.Status.State > t1.Status.State
+}
+
+// EventCreateTask is the type used to put CreateTask events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventCreateTask struct {
+ Task *api.Task
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []TaskCheckFunc
+}
+
+func (e EventCreateTask) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventCreateTask)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Task, typedEvent.Task) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventUpdateTask is the type used to put UpdateTask events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventUpdateTask struct {
+ Task *api.Task
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []TaskCheckFunc
+}
+
+func (e EventUpdateTask) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventUpdateTask)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Task, typedEvent.Task) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventDeleteTask is the type used to put DeleteTask events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventDeleteTask struct {
+ Task *api.Task
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []TaskCheckFunc
+}
+
+func (e EventDeleteTask) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventDeleteTask)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Task, typedEvent.Task) {
+ return false
+ }
+ }
+ return true
+}
+
+// ServiceCheckFunc is the type of function used to perform filtering checks on
+// api.Service structures.
+type ServiceCheckFunc func(j1, j2 *api.Service) bool
+
+// ServiceCheckID is a ServiceCheckFunc for matching service IDs.
+func ServiceCheckID(j1, j2 *api.Service) bool {
+ return j1.ID == j2.ID
+}
+
+// EventCreateService is the type used to put CreateService events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventCreateService struct {
+ Service *api.Service
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []ServiceCheckFunc
+}
+
+func (e EventCreateService) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventCreateService)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Service, typedEvent.Service) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventUpdateService is the type used to put UpdateService events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventUpdateService struct {
+ Service *api.Service
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []ServiceCheckFunc
+}
+
+func (e EventUpdateService) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventUpdateService)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Service, typedEvent.Service) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventDeleteService is the type used to put DeleteService events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventDeleteService struct {
+ Service *api.Service
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []ServiceCheckFunc
+}
+
+func (e EventDeleteService) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventDeleteService)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Service, typedEvent.Service) {
+ return false
+ }
+ }
+ return true
+}
+
+// NetworkCheckFunc is the type of function used to perform filtering checks on
+// api.Service structures.
+type NetworkCheckFunc func(n1, n2 *api.Network) bool
+
+// NetworkCheckID is a NetworkCheckFunc for matching network IDs.
+func NetworkCheckID(n1, n2 *api.Network) bool {
+ return n1.ID == n2.ID
+}
+
+// EventCreateNetwork is the type used to put CreateNetwork events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventCreateNetwork struct {
+ Network *api.Network
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []NetworkCheckFunc
+}
+
+func (e EventCreateNetwork) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventCreateNetwork)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Network, typedEvent.Network) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventUpdateNetwork is the type used to put UpdateNetwork events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventUpdateNetwork struct {
+ Network *api.Network
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []NetworkCheckFunc
+}
+
+func (e EventUpdateNetwork) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventUpdateNetwork)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Network, typedEvent.Network) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventDeleteNetwork is the type used to put DeleteNetwork events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventDeleteNetwork struct {
+ Network *api.Network
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []NetworkCheckFunc
+}
+
+func (e EventDeleteNetwork) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventDeleteNetwork)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Network, typedEvent.Network) {
+ return false
+ }
+ }
+ return true
+}
+
+// NodeCheckFunc is the type of function used to perform filtering checks on
+// api.Service structures.
+type NodeCheckFunc func(n1, n2 *api.Node) bool
+
+// NodeCheckID is a NodeCheckFunc for matching node IDs.
+func NodeCheckID(n1, n2 *api.Node) bool {
+ return n1.ID == n2.ID
+}
+
+// NodeCheckState is a NodeCheckFunc for matching node state.
+func NodeCheckState(n1, n2 *api.Node) bool {
+ return n1.Status.State == n2.Status.State
+}
+
+// EventCreateNode is the type used to put CreateNode events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventCreateNode struct {
+ Node *api.Node
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []NodeCheckFunc
+}
+
+func (e EventCreateNode) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventCreateNode)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Node, typedEvent.Node) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventUpdateNode is the type used to put DeleteNode events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventUpdateNode struct {
+ Node *api.Node
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []NodeCheckFunc
+}
+
+func (e EventUpdateNode) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventUpdateNode)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Node, typedEvent.Node) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventDeleteNode is the type used to put DeleteNode events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventDeleteNode struct {
+ Node *api.Node
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []NodeCheckFunc
+}
+
+func (e EventDeleteNode) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventDeleteNode)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Node, typedEvent.Node) {
+ return false
+ }
+ }
+ return true
+}
+
+// ClusterCheckFunc is the type of function used to perform filtering checks on
+// api.Cluster structures.
+type ClusterCheckFunc func(v1, v2 *api.Cluster) bool
+
+// ClusterCheckID is a ClusterCheckFunc for matching volume IDs.
+func ClusterCheckID(v1, v2 *api.Cluster) bool {
+ return v1.ID == v2.ID
+}
+
+// EventCreateCluster is the type used to put CreateCluster events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventCreateCluster struct {
+ Cluster *api.Cluster
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []ClusterCheckFunc
+}
+
+func (e EventCreateCluster) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventCreateCluster)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Cluster, typedEvent.Cluster) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventUpdateCluster is the type used to put UpdateCluster events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventUpdateCluster struct {
+ Cluster *api.Cluster
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []ClusterCheckFunc
+}
+
+func (e EventUpdateCluster) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventUpdateCluster)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Cluster, typedEvent.Cluster) {
+ return false
+ }
+ }
+ return true
+}
+
+// EventDeleteCluster is the type used to put DeleteCluster events on the
+// publish/subscribe queue and filter these events in calls to Watch.
+type EventDeleteCluster struct {
+ Cluster *api.Cluster
+ // Checks is a list of functions to call to filter events for a watch
+ // stream. They are applied with AND logic. They are only applicable for
+ // calls to Watch.
+ Checks []ClusterCheckFunc
+}
+
+func (e EventDeleteCluster) matches(watchEvent events.Event) bool {
+ typedEvent, ok := watchEvent.(EventDeleteCluster)
+ if !ok {
+ return false
+ }
+
+ for _, check := range e.Checks {
+ if !check(e.Cluster, typedEvent.Cluster) {
+ return false
+ }
+ }
+ return true
+}
+
+// Watch takes a variable number of events to match against. The subscriber
+// will receive events that match any of the arguments passed to Watch.
+//
+// Examples:
+//
+// // subscribe to all events
+// Watch(q)
+//
+// // subscribe to all UpdateTask events
+// Watch(q, EventUpdateTask{})
+//
+// // subscribe to all task-related events
+// Watch(q, EventUpdateTask{}, EventCreateTask{}, EventDeleteTask{})
+//
+// // subscribe to UpdateTask for node 123
+// Watch(q, EventUpdateTask{Task: &api.Task{NodeID: 123},
+// Checks: []TaskCheckFunc{TaskCheckNodeID}})
+//
+// // subscribe to UpdateTask for node 123, as well as CreateTask
+// // for node 123 that also has ServiceID set to "abc"
+// Watch(q, EventUpdateTask{Task: &api.Task{NodeID: 123},
+// Checks: []TaskCheckFunc{TaskCheckNodeID}},
+// EventCreateTask{Task: &api.Task{NodeID: 123, ServiceID: "abc"},
+// Checks: []TaskCheckFunc{TaskCheckNodeID,
+// func(t1, t2 *api.Task) bool {
+// return t1.ServiceID == t2.ServiceID
+// }}})
+func Watch(queue *watch.Queue, specifiers ...Event) (eventq chan events.Event, cancel func()) {
+ if len(specifiers) == 0 {
+ return queue.Watch()
+ }
+ return queue.CallbackWatch(events.MatcherFunc(func(event events.Event) bool {
+ for _, s := range specifiers {
+ if s.matches(event) {
+ return true
+ }
+ }
+ return false
+ }))
+}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go b/vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go
new file mode 100644
index 0000000000..c26e6684cf
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go
@@ -0,0 +1,48 @@
+package watch
+
+import "github.com/docker/go-events"
+
+// Queue is the structure used to publish events and watch for them.
+type Queue struct {
+ broadcast *events.Broadcaster
+}
+
+// NewQueue creates a new publish/subscribe queue which supports watchers.
+// The channels that it will create for subscriptions will have the buffer
+// size specified by buffer.
+func NewQueue(buffer int) *Queue {
+ return &Queue{
+ broadcast: events.NewBroadcaster(),
+ }
+}
+
+// Watch returns a channel which will receive all items published to the
+// queue from this point, until cancel is called.
+func (q *Queue) Watch() (eventq chan events.Event, cancel func()) {
+ return q.CallbackWatch(nil)
+}
+
+// CallbackWatch returns a channel which will receive all events published to
+// the queue from this point that pass the check in the provided callback
+// function. The returned cancel function will stop the flow of events and
+// close the channel.
+func (q *Queue) CallbackWatch(matcher events.Matcher) (eventq chan events.Event, cancel func()) {
+ ch := events.NewChannel(0)
+ sink := events.Sink(events.NewQueue(ch))
+
+ if matcher != nil {
+ sink = events.NewFilter(sink, matcher)
+ }
+
+ q.broadcast.Add(sink)
+ return ch.C, func() {
+ q.broadcast.Remove(sink)
+ ch.Close()
+ sink.Close()
+ }
+}
+
+// Publish adds an item to the queue.
+func (q *Queue) Publish(item events.Event) {
+ q.broadcast.Write(item)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/picker/picker.go b/vendor/src/github.com/docker/swarmkit/picker/picker.go
new file mode 100644
index 0000000000..7f0393800e
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/picker/picker.go
@@ -0,0 +1,330 @@
+package picker
+
+import (
+ "fmt"
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+
+ "github.com/docker/swarmkit/api"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/transport"
+)
+
+var errRemotesUnavailable = fmt.Errorf("no remote hosts provided")
+
+// Remotes keeps track of remote addresses by weight, informed by
+// observations.
+type Remotes interface {
+ // Weight returns the remotes with their current weights.
+ Weights() map[api.Peer]int
+
+ // Select a remote from the set of available remotes with optionally
+ // excluding ID or address.
+ Select(...string) (api.Peer, error)
+
+ // Observe records an experience with a particular remote. A positive weight
+ // indicates a good experience and a negative weight a bad experience.
+ //
+ // The observation will be used to calculate a moving weight, which is
+ // implementation dependent. This method will be called such that repeated
+ // observations of the same master in each session request are favored.
+ Observe(peer api.Peer, weight int)
+
+ // ObserveIfExists records an experience with a particular remote if when a
+ // remote exists.
+ ObserveIfExists(peer api.Peer, weight int)
+
+ // Remove the remote from the list completely.
+ Remove(addrs ...api.Peer)
+}
+
+// NewRemotes returns a Remotes instance with the provided set of addresses.
+// Entries provided are heavily weighted initially.
+func NewRemotes(peers ...api.Peer) Remotes {
+ mwr := &remotesWeightedRandom{
+ remotes: make(map[api.Peer]int),
+ }
+
+ for _, peer := range peers {
+ mwr.Observe(peer, 1)
+ }
+
+ return mwr
+}
+
+type remotesWeightedRandom struct {
+ remotes map[api.Peer]int
+ mu sync.Mutex
+
+ // workspace to avoid reallocation. these get lazily allocated when
+ // selecting values.
+ cdf []float64
+ peers []api.Peer
+}
+
+func (mwr *remotesWeightedRandom) Weights() map[api.Peer]int {
+ mwr.mu.Lock()
+ defer mwr.mu.Unlock()
+
+ ms := make(map[api.Peer]int, len(mwr.remotes))
+ for addr, weight := range mwr.remotes {
+ ms[addr] = weight
+ }
+
+ return ms
+}
+
+func (mwr *remotesWeightedRandom) Select(excludes ...string) (api.Peer, error) {
+ mwr.mu.Lock()
+ defer mwr.mu.Unlock()
+
+ // NOTE(stevvooe): We then use a weighted random selection algorithm
+ // (http://stackoverflow.com/questions/4463561/weighted-random-selection-from-array)
+ // to choose the master to connect to.
+ //
+ // It is possible that this is insufficient. The following may inform a
+ // better solution:
+
+ // https://github.com/LK4D4/sample
+ //
+ // The first link applies exponential distribution weight choice reservior
+ // sampling. This may be relevant if we view the master selection as a
+ // distributed reservior sampling problem.
+
+ // bias to zero-weighted remotes have same probability. otherwise, we
+ // always select first entry when all are zero.
+ const bias = 0.1
+
+ // clear out workspace
+ mwr.cdf = mwr.cdf[:0]
+ mwr.peers = mwr.peers[:0]
+
+ cum := 0.0
+ // calculate CDF over weights
+ for peer, weight := range mwr.remotes {
+ for _, exclude := range excludes {
+ if peer.NodeID == exclude || peer.Addr == exclude {
+ continue
+ }
+ }
+ if weight < 0 {
+ // treat these as zero, to keep there selection unlikely.
+ weight = 0
+ }
+
+ cum += float64(weight) + bias
+ mwr.cdf = append(mwr.cdf, cum)
+ mwr.peers = append(mwr.peers, peer)
+ }
+
+ if len(mwr.peers) == 0 {
+ return api.Peer{}, errRemotesUnavailable
+ }
+
+ r := mwr.cdf[len(mwr.cdf)-1] * rand.Float64()
+ i := sort.SearchFloat64s(mwr.cdf, r)
+ return mwr.peers[i], nil
+}
+
+func (mwr *remotesWeightedRandom) Observe(peer api.Peer, weight int) {
+ mwr.mu.Lock()
+ defer mwr.mu.Unlock()
+
+ mwr.observe(peer, float64(weight))
+}
+
+func (mwr *remotesWeightedRandom) ObserveIfExists(peer api.Peer, weight int) {
+ mwr.mu.Lock()
+ defer mwr.mu.Unlock()
+
+ if _, ok := mwr.remotes[peer]; !ok {
+ return
+ }
+
+ mwr.observe(peer, float64(weight))
+}
+
+func (mwr *remotesWeightedRandom) Remove(addrs ...api.Peer) {
+ mwr.mu.Lock()
+ defer mwr.mu.Unlock()
+
+ for _, addr := range addrs {
+ delete(mwr.remotes, addr)
+ }
+}
+
+const (
+ // remoteWeightSmoothingFactor for exponential smoothing. This adjusts how
+ // much of the // observation and old value we are using to calculate the new value.
+ // See
+ // https://en.wikipedia.org/wiki/Exponential_smoothing#Basic_exponential_smoothing
+ // for details.
+ remoteWeightSmoothingFactor = 0.7
+ remoteWeightMax = 1 << 8
+)
+
+func clip(x float64) float64 {
+ if math.IsNaN(x) {
+ // treat garbage as such
+ // acts like a no-op for us.
+ return 0
+ }
+ return math.Max(math.Min(remoteWeightMax, x), -remoteWeightMax)
+}
+
+func (mwr *remotesWeightedRandom) observe(peer api.Peer, weight float64) {
+
+ // While we have a decent, ad-hoc approach here to weight subsequent
+ // observerations, we may want to look into applying forward decay:
+ //
+ // http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf
+ //
+ // We need to get better data from behavior in a cluster.
+
+ // makes the math easier to read below
+ var (
+ w0 = float64(mwr.remotes[peer])
+ w1 = clip(weight)
+ )
+ const α = remoteWeightSmoothingFactor
+
+ // Multiply the new value to current value, and appy smoothing against the old
+ // value.
+ wn := clip(α*w1 + (1-α)*w0)
+
+ mwr.remotes[peer] = int(math.Ceil(wn))
+}
+
+// Picker implements a grpc Picker
+type Picker struct {
+ r Remotes
+ peer api.Peer // currently selected remote peer
+ conn *grpc.Conn
+ mu sync.Mutex
+}
+
+var _ grpc.Picker = &Picker{}
+
+// NewPicker returns a Picker
+func NewPicker(r Remotes, initial ...string) *Picker {
+ var peer api.Peer
+ if len(initial) == 0 {
+ peer, _ = r.Select() // empty in case of error
+ } else {
+ peer = api.Peer{Addr: initial[0]}
+ }
+ return &Picker{r: r, peer: peer}
+}
+
+// Init does initial processing for the Picker, e.g., initiate some connections.
+func (p *Picker) Init(cc *grpc.ClientConn) error {
+ p.mu.Lock()
+ peer := p.peer
+ p.mu.Unlock()
+
+ p.r.ObserveIfExists(peer, 1)
+ c, err := grpc.NewConn(cc)
+ if err != nil {
+ return err
+ }
+
+ p.mu.Lock()
+ p.conn = c
+ p.mu.Unlock()
+ return nil
+}
+
+// Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC
+// or some error happens.
+func (p *Picker) Pick(ctx context.Context) (transport.ClientTransport, error) {
+ p.mu.Lock()
+ peer := p.peer
+ p.mu.Unlock()
+ transport, err := p.conn.Wait(ctx)
+ if err != nil {
+ p.r.ObserveIfExists(peer, -1)
+ }
+
+ return transport, err
+}
+
+// PickAddr picks a peer address for connecting. This will be called repeated for
+// connecting/reconnecting.
+func (p *Picker) PickAddr() (string, error) {
+ p.mu.Lock()
+ peer := p.peer
+ p.mu.Unlock()
+
+ p.r.ObserveIfExists(peer, -1) // downweight the current addr
+
+ var err error
+ peer, err = p.r.Select("")
+ if err != nil {
+ return "", err
+ }
+
+ p.mu.Lock()
+ p.peer = peer
+ p.mu.Unlock()
+ return p.peer.Addr, err
+}
+
+// State returns the connectivity state of the underlying connections.
+func (p *Picker) State() (grpc.ConnectivityState, error) {
+ return p.conn.State(), nil
+}
+
+// WaitForStateChange blocks until the state changes to something other than
+// the sourceState. It returns the new state or error.
+func (p *Picker) WaitForStateChange(ctx context.Context, sourceState grpc.ConnectivityState) (grpc.ConnectivityState, error) {
+ p.mu.Lock()
+ conn := p.conn
+ peer := p.peer
+ p.mu.Unlock()
+
+ state, err := conn.WaitForStateChange(ctx, sourceState)
+ if err != nil {
+ return state, err
+ }
+
+ // TODO(stevvooe): We may want to actually score the transition by checking
+ // sourceState.
+
+ // TODO(stevvooe): This is questionable, but we'll see how it works.
+ switch state {
+ case grpc.Idle:
+ p.r.ObserveIfExists(peer, 1)
+ case grpc.Connecting:
+ p.r.ObserveIfExists(peer, 1)
+ case grpc.Ready:
+ p.r.ObserveIfExists(peer, 1)
+ case grpc.TransientFailure:
+ p.r.ObserveIfExists(peer, -1)
+ case grpc.Shutdown:
+ p.r.ObserveIfExists(peer, -1)
+ }
+
+ return state, err
+}
+
+// Reset the current connection and force a reconnect to another address.
+func (p *Picker) Reset() error {
+ p.mu.Lock()
+ conn := p.conn
+ p.mu.Unlock()
+
+ conn.NotifyReset()
+ return nil
+}
+
+// Close closes all the Conn's owned by this Picker.
+func (p *Picker) Close() error {
+ p.mu.Lock()
+ conn := p.conn
+ p.mu.Unlock()
+
+ return conn.Close()
+}
diff --git a/vendor/src/github.com/docker/swarmkit/protobuf/plugin/gen.go b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/gen.go
new file mode 100644
index 0000000000..b68b83798a
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/gen.go
@@ -0,0 +1,3 @@
+package plugin
+
+//go:generate protoc -I.:/usr/local --gogoswarm_out=import_path=github.com/docker/swarmkit/protobuf/plugin,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. plugin.proto
diff --git a/vendor/src/github.com/docker/swarmkit/protobuf/plugin/helpers.go b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/helpers.go
new file mode 100644
index 0000000000..daea795b36
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/helpers.go
@@ -0,0 +1,11 @@
+package plugin
+
+import (
+ "github.com/gogo/protobuf/proto"
+ google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+)
+
+// DeepcopyEnabled returns true if deepcopy is enabled for the descriptor.
+func DeepcopyEnabled(options *google_protobuf.MessageOptions) bool {
+ return proto.GetBoolExtension(options, E_Deepcopy, true)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go
new file mode 100644
index 0000000000..927e03e77e
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go
@@ -0,0 +1,464 @@
+// Code generated by protoc-gen-gogo.
+// source: plugin.proto
+// DO NOT EDIT!
+
+/*
+ Package plugin is a generated protocol buffer package.
+
+ It is generated from these files:
+ plugin.proto
+
+ It has these top-level messages:
+ TLSAuthorization
+*/
+package plugin
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+type TLSAuthorization struct {
+ // Roles contains the acceptable TLS OU roles for the handler.
+ Roles []string `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"`
+ // Insecure is set to true if this method does not require
+ // authorization. NOTE: Specifying both "insecure" and a nonempty
+ // list of roles is invalid. This would fail at codegen time.
+ Insecure *bool `protobuf:"varint,2,opt,name=insecure" json:"insecure,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TLSAuthorization) Reset() { *m = TLSAuthorization{} }
+func (*TLSAuthorization) ProtoMessage() {}
+func (*TLSAuthorization) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
+
+var E_Deepcopy = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 70000,
+ Name: "docker.protobuf.plugin.deepcopy",
+ Tag: "varint,70000,opt,name=deepcopy,def=1",
+}
+
+var E_TlsAuthorization = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MethodOptions)(nil),
+ ExtensionType: (*TLSAuthorization)(nil),
+ Field: 73626345,
+ Name: "docker.protobuf.plugin.tls_authorization",
+ Tag: "bytes,73626345,opt,name=tls_authorization,json=tlsAuthorization",
+}
+
+func init() {
+ proto.RegisterType((*TLSAuthorization)(nil), "docker.protobuf.plugin.TLSAuthorization")
+ proto.RegisterExtension(E_Deepcopy)
+ proto.RegisterExtension(E_TlsAuthorization)
+}
+func (this *TLSAuthorization) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&plugin.TLSAuthorization{")
+ if this.Roles != nil {
+ s = append(s, "Roles: "+fmt.Sprintf("%#v", this.Roles)+",\n")
+ }
+ if this.Insecure != nil {
+ s = append(s, "Insecure: "+valueToGoStringPlugin(this.Insecure, "bool")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringPlugin(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringPlugin(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
+func (m *TLSAuthorization) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TLSAuthorization) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if m.Insecure != nil {
+ data[i] = 0x10
+ i++
+ if *m.Insecure {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(data[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeFixed64Plugin(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Plugin(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintPlugin(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *TLSAuthorization) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ if m.Insecure != nil {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovPlugin(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozPlugin(x uint64) (n int) {
+ return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *TLSAuthorization) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TLSAuthorization{`,
+ `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`,
+ `Insecure:` + valueToStringPlugin(this.Insecure) + `,`,
+ `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringPlugin(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *TLSAuthorization) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TLSAuthorization: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TLSAuthorization: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Insecure = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipPlugin(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthPlugin
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipPlugin(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
+)
+
+var fileDescriptorPlugin = []byte{
+ // 249 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
+ 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4b, 0xc9, 0x4f, 0xce, 0x4e, 0x2d,
+ 0x82, 0xf0, 0x92, 0x4a, 0xd3, 0xf4, 0x20, 0xb2, 0x52, 0x0a, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9,
+ 0xfa, 0x30, 0x71, 0xfd, 0x94, 0xd4, 0xe2, 0xe4, 0xa2, 0xcc, 0x82, 0x92, 0x7c, 0xa8, 0x5a, 0x25,
+ 0x17, 0x2e, 0x81, 0x10, 0x9f, 0x60, 0xc7, 0xd2, 0x92, 0x8c, 0xfc, 0xa2, 0xcc, 0xaa, 0xc4, 0x92,
+ 0xcc, 0xfc, 0x3c, 0x21, 0x11, 0x2e, 0xd6, 0xa2, 0xfc, 0x9c, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66,
+ 0x0d, 0xce, 0x20, 0x08, 0x47, 0x48, 0x8a, 0x8b, 0x23, 0x33, 0xaf, 0x38, 0x35, 0xb9, 0xb4, 0x28,
+ 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x23, 0x08, 0xce, 0xb7, 0x72, 0xe6, 0xe2, 0x48, 0x49, 0x4d,
+ 0x2d, 0x48, 0xce, 0x2f, 0xa8, 0x14, 0x92, 0xd7, 0x83, 0x58, 0x8a, 0x70, 0x8c, 0x6f, 0x6a, 0x71,
+ 0x71, 0x62, 0x7a, 0xaa, 0x7f, 0x01, 0xc8, 0xf4, 0x62, 0x89, 0x0f, 0x8b, 0x58, 0x40, 0xda, 0xad,
+ 0x58, 0x4a, 0x8a, 0x4a, 0x53, 0x83, 0xe0, 0x1a, 0xad, 0x2a, 0xb8, 0x04, 0x4b, 0x72, 0x8a, 0xe3,
+ 0x13, 0x51, 0xdc, 0x22, 0x87, 0xc5, 0x34, 0xa0, 0x7c, 0x0a, 0xcc, 0xb0, 0x97, 0x4f, 0x7b, 0x95,
+ 0x81, 0xa6, 0x71, 0x1b, 0x69, 0xe8, 0x61, 0x0f, 0x03, 0x3d, 0x74, 0xef, 0x05, 0x09, 0x00, 0x6d,
+ 0x41, 0x11, 0x71, 0x92, 0x39, 0xf1, 0x50, 0x8e, 0xe1, 0x06, 0x10, 0x7f, 0x78, 0x28, 0xc7, 0xd8,
+ 0xf0, 0x48, 0x8e, 0xf1, 0x04, 0x10, 0x5f, 0x00, 0xe2, 0x07, 0x40, 0x0c, 0x08, 0x00, 0x00, 0xff,
+ 0xff, 0x04, 0x4e, 0xf8, 0x38, 0x6b, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.proto b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.proto
new file mode 100644
index 0000000000..53602cb84c
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.proto
@@ -0,0 +1,25 @@
+syntax = "proto2";
+
+package docker.protobuf.plugin;
+
+import "google/protobuf/descriptor.proto";
+
+extend google.protobuf.MessageOptions {
+ optional bool deepcopy = 70000 [default=true];
+}
+
+message TLSAuthorization {
+ // Roles contains the acceptable TLS OU roles for the handler.
+ repeated string roles = 1;
+
+ // Insecure is set to true if this method does not require
+ // authorization. NOTE: Specifying both "insecure" and a nonempty
+ // list of roles is invalid. This would fail at codegen time.
+ optional bool insecure = 2;
+}
+
+extend google.protobuf.MethodOptions {
+ // TLSAuthorization contains the authorization parameters for this
+ // method.
+ optional TLSAuthorization tls_authorization = 73626345;
+}
diff --git a/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/doc.go b/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/doc.go
new file mode 100644
index 0000000000..8a77dad4dc
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/doc.go
@@ -0,0 +1,9 @@
+// Package ptypes is a copy of the golang/protobuf/ptypes that we'll need to
+// use with our regenerated ptypes until google gets their act together and
+// makes their "Well Known Types" actually usable by other parties.
+//
+// It is more likely that this issue will be resolved by gogo.
+//
+// Note that this is not a vendoring of the package. We have to change the
+// types to match the generated types.
+package ptypes
diff --git a/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/duration.go b/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/duration.go
new file mode 100644
index 0000000000..9625e4fc14
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ durpb "github.com/docker/swarmkit/api/duration"
+)
+
+const (
+ // Range of a durpb.Duration in seconds, as specified in
+ // google/protobuf/duration.proto. This is about 10,000 years in seconds.
+ maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+ minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+ if d == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", d)
+ }
+ if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", d)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+ }
+ return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+ if err := validateDuration(p); err != nil {
+ return 0, err
+ }
+ d := time.Duration(p.Seconds) * time.Second
+ if int64(d/time.Second) != p.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ if p.Nanos != 0 {
+ d += time.Duration(p.Nanos)
+ if (d < 0) != (p.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ }
+ return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &durpb.Duration{
+ Seconds: secs,
+ Nanos: int32(nanos),
+ }
+}
diff --git a/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go b/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000000..00516fd0c7
--- /dev/null
+++ b/vendor/src/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go
@@ -0,0 +1,135 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ tspb "github.com/docker/swarmkit/api/timestamp"
+)
+
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+ seconds := t.Unix()
+ nanos := int32(t.Sub(time.Unix(seconds, 0)))
+ ts := &tspb.Timestamp{
+ Seconds: seconds,
+ Nanos: nanos,
+ }
+ if err := validateTimestamp(ts); err != nil {
+ return nil, err
+ }
+ return ts, nil
+}
+
+// MustTimestampProto converts time.Time to a google.protobuf.Timestamp proto.
+// It panics if input timestamp is invalid.
+func MustTimestampProto(t time.Time) *tspb.Timestamp {
+ ts, err := TimestampProto(t)
+ if err != nil {
+ panic(err.Error())
+ }
+ return ts
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+ t, err := Timestamp(ts)
+ if err != nil {
+ return fmt.Sprintf("(%v)", err)
+ }
+ return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/src/github.com/gogo/protobuf/LICENSE b/vendor/src/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 0000000000..335e38e19b
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,36 @@
+Extensions for Protocol Buffers to create more go like structures.
+
+Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+http://github.com/gogo/protobuf/gogoproto
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/src/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/src/github.com/gogo/protobuf/gogoproto/Makefile
new file mode 100644
index 0000000000..557f294933
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/gogoproto/Makefile
@@ -0,0 +1,36 @@
+# Extensions for Protocol Buffers to create more go like structures.
+#
+# Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+# http://github.com/gogo/protobuf/gogoproto
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+ protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. --proto_path=../../../../:../protobuf/:. *.proto
+
+restore:
+ cp gogo.pb.golden gogo.pb.go
+
+preserve:
+ cp gogo.pb.go gogo.pb.golden
diff --git a/vendor/src/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/src/github.com/gogo/protobuf/gogoproto/doc.go
new file mode 100644
index 0000000000..f0424d4f8a
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/gogoproto/doc.go
@@ -0,0 +1,168 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package gogoproto provides extensions for protocol buffers to achieve:
+
+ - fast marshalling and unmarshalling.
+ - peace of mind by optionally generating test and benchmark code.
+ - more canonical Go structures.
+ - less typing by optionally generating extra helper code.
+ - goprotobuf compatibility
+
+More Canonical Go Structures
+
+A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
+You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
+Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
+
+ - nullable, if false, a field is generated without a pointer (see warning below).
+ - embed, if true, the field is generated as an embedded field.
+ - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
+ - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
+ - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums.
+ - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
+ - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
+
+Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
+
+Let us look at:
+
+ github.com/gogo/protobuf/test/example/example.proto
+
+for a quicker overview.
+
+The following message:
+
+ package test;
+
+ import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+ message A {
+ optional string Description = 1 [(gogoproto.nullable) = false];
+ optional int64 Number = 2 [(gogoproto.nullable) = false];
+ optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
+ }
+
+Will generate a go struct which looks a lot like this:
+
+ type A struct {
+ Description string
+ Number int64
+ Id github_com_gogo_protobuf_test_custom.Uuid
+ }
+
+You will see there are no pointers, since all fields are non-nullable.
+You will also see a custom type which marshals to a string.
+Be warned it is your responsibility to test your custom types thoroughly.
+You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
+
+Next we will embed the message A in message B.
+
+ message B {
+ optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
+ repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
+ }
+
+See below that A is embedded in B.
+
+ type B struct {
+ A
+ G []github_com_gogo_protobuf_test_custom.Uint128
+ }
+
+Also see the repeated custom type.
+
+ type Uint128 [2]uint64
+
+Next we will create a custom name for one of our fields.
+
+ message C {
+ optional int64 size = 1 [(gogoproto.customname) = "MySize"];
+ }
+
+See below that the field's name is MySize and not Size.
+
+ type C struct {
+ MySize *int64
+ }
+
+The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
+As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
+Using customname you can fix this error without changing the field name.
+This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
+
+Gogoprotobuf also has some more subtle changes, these could be changed back:
+
+ - the generated package name for imports do not have the extra /filename.pb,
+ but are actually the imports specified in the .proto file.
+
+Gogoprotobuf also has lost some features which should be brought back with time:
+
+ - Marshalling and unmarshalling with reflect and without the unsafe package,
+ this requires work in pointer_reflect.go
+
+Why does nullable break protocol buffer specifications:
+
+The protocol buffer specification states, somewhere, that you should be able to tell whether a
+field is set or unset. With the option nullable=false this feature is lost,
+since your non-nullable fields will always be set. It can be seen as a layer on top of
+protocol buffers, where before and after marshalling all non-nullable fields are set
+and they cannot be unset.
+
+Goprotobuf Compatibility:
+
+Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
+Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
+The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
+
+ - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
+ - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
+ - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
+ - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
+ - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
+ - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
+ - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
+
+Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
+
+ - github.com/gogo/protobuf/plugin/<extension_name>
+
+If you do not use any of these extension the code that is generated
+will be the same as if goprotobuf has generated it.
+
+The most complete way to see examples is to look at
+
+ github.com/gogo/protobuf/test/thetest.proto
+
+Gogoprototest is a seperate project,
+because we want to keep gogoprotobuf independant of goprotobuf,
+but we still want to test it thoroughly.
+
+*/
+package gogoproto
diff --git a/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go
new file mode 100644
index 0000000000..f97c2338e7
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go
@@ -0,0 +1,661 @@
+// Code generated by protoc-gen-gogo.
+// source: gogo.proto
+// DO NOT EDIT!
+
+/*
+Package gogoproto is a generated protocol buffer package.
+
+It is generated from these files:
+ gogo.proto
+
+It has these top-level messages:
+*/
+package gogoproto
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 62001,
+ Name: "gogoproto.goproto_enum_prefix",
+ Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix",
+}
+
+var E_GoprotoEnumStringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 62021,
+ Name: "gogoproto.goproto_enum_stringer",
+ Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer",
+}
+
+var E_EnumStringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 62022,
+ Name: "gogoproto.enum_stringer",
+ Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer",
+}
+
+var E_EnumCustomname = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 62023,
+ Name: "gogoproto.enum_customname",
+ Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname",
+}
+
+var E_EnumvalueCustomname = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 66001,
+ Name: "gogoproto.enumvalue_customname",
+ Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname",
+}
+
+var E_GoprotoGettersAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63001,
+ Name: "gogoproto.goproto_getters_all",
+ Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll",
+}
+
+var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63002,
+ Name: "gogoproto.goproto_enum_prefix_all",
+ Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll",
+}
+
+var E_GoprotoStringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63003,
+ Name: "gogoproto.goproto_stringer_all",
+ Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll",
+}
+
+var E_VerboseEqualAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63004,
+ Name: "gogoproto.verbose_equal_all",
+ Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll",
+}
+
+var E_FaceAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63005,
+ Name: "gogoproto.face_all",
+ Tag: "varint,63005,opt,name=face_all,json=faceAll",
+}
+
+var E_GostringAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63006,
+ Name: "gogoproto.gostring_all",
+ Tag: "varint,63006,opt,name=gostring_all,json=gostringAll",
+}
+
+var E_PopulateAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63007,
+ Name: "gogoproto.populate_all",
+ Tag: "varint,63007,opt,name=populate_all,json=populateAll",
+}
+
+var E_StringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63008,
+ Name: "gogoproto.stringer_all",
+ Tag: "varint,63008,opt,name=stringer_all,json=stringerAll",
+}
+
+var E_OnlyoneAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63009,
+ Name: "gogoproto.onlyone_all",
+ Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll",
+}
+
+var E_EqualAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63013,
+ Name: "gogoproto.equal_all",
+ Tag: "varint,63013,opt,name=equal_all,json=equalAll",
+}
+
+var E_DescriptionAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63014,
+ Name: "gogoproto.description_all",
+ Tag: "varint,63014,opt,name=description_all,json=descriptionAll",
+}
+
+var E_TestgenAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63015,
+ Name: "gogoproto.testgen_all",
+ Tag: "varint,63015,opt,name=testgen_all,json=testgenAll",
+}
+
+var E_BenchgenAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63016,
+ Name: "gogoproto.benchgen_all",
+ Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll",
+}
+
+var E_MarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63017,
+ Name: "gogoproto.marshaler_all",
+ Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll",
+}
+
+var E_UnmarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63018,
+ Name: "gogoproto.unmarshaler_all",
+ Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll",
+}
+
+var E_StableMarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63019,
+ Name: "gogoproto.stable_marshaler_all",
+ Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll",
+}
+
+var E_SizerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63020,
+ Name: "gogoproto.sizer_all",
+ Tag: "varint,63020,opt,name=sizer_all,json=sizerAll",
+}
+
+var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63021,
+ Name: "gogoproto.goproto_enum_stringer_all",
+ Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll",
+}
+
+var E_EnumStringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63022,
+ Name: "gogoproto.enum_stringer_all",
+ Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll",
+}
+
+var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63023,
+ Name: "gogoproto.unsafe_marshaler_all",
+ Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll",
+}
+
+var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63024,
+ Name: "gogoproto.unsafe_unmarshaler_all",
+ Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll",
+}
+
+var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63025,
+ Name: "gogoproto.goproto_extensions_map_all",
+ Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll",
+}
+
+var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63026,
+ Name: "gogoproto.goproto_unrecognized_all",
+ Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll",
+}
+
+var E_GogoprotoImport = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63027,
+ Name: "gogoproto.gogoproto_import",
+ Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport",
+}
+
+var E_ProtosizerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63028,
+ Name: "gogoproto.protosizer_all",
+ Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll",
+}
+
+var E_CompareAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63029,
+ Name: "gogoproto.compare_all",
+ Tag: "varint,63029,opt,name=compare_all,json=compareAll",
+}
+
+var E_GoprotoGetters = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64001,
+ Name: "gogoproto.goproto_getters",
+ Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters",
+}
+
+var E_GoprotoStringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64003,
+ Name: "gogoproto.goproto_stringer",
+ Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer",
+}
+
+var E_VerboseEqual = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64004,
+ Name: "gogoproto.verbose_equal",
+ Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual",
+}
+
+var E_Face = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64005,
+ Name: "gogoproto.face",
+ Tag: "varint,64005,opt,name=face",
+}
+
+var E_Gostring = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64006,
+ Name: "gogoproto.gostring",
+ Tag: "varint,64006,opt,name=gostring",
+}
+
+var E_Populate = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64007,
+ Name: "gogoproto.populate",
+ Tag: "varint,64007,opt,name=populate",
+}
+
+var E_Stringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 67008,
+ Name: "gogoproto.stringer",
+ Tag: "varint,67008,opt,name=stringer",
+}
+
+var E_Onlyone = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64009,
+ Name: "gogoproto.onlyone",
+ Tag: "varint,64009,opt,name=onlyone",
+}
+
+var E_Equal = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64013,
+ Name: "gogoproto.equal",
+ Tag: "varint,64013,opt,name=equal",
+}
+
+var E_Description = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64014,
+ Name: "gogoproto.description",
+ Tag: "varint,64014,opt,name=description",
+}
+
+var E_Testgen = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64015,
+ Name: "gogoproto.testgen",
+ Tag: "varint,64015,opt,name=testgen",
+}
+
+var E_Benchgen = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64016,
+ Name: "gogoproto.benchgen",
+ Tag: "varint,64016,opt,name=benchgen",
+}
+
+var E_Marshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64017,
+ Name: "gogoproto.marshaler",
+ Tag: "varint,64017,opt,name=marshaler",
+}
+
+var E_Unmarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64018,
+ Name: "gogoproto.unmarshaler",
+ Tag: "varint,64018,opt,name=unmarshaler",
+}
+
+var E_StableMarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64019,
+ Name: "gogoproto.stable_marshaler",
+ Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler",
+}
+
+var E_Sizer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64020,
+ Name: "gogoproto.sizer",
+ Tag: "varint,64020,opt,name=sizer",
+}
+
+var E_UnsafeMarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64023,
+ Name: "gogoproto.unsafe_marshaler",
+ Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler",
+}
+
+var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64024,
+ Name: "gogoproto.unsafe_unmarshaler",
+ Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler",
+}
+
+var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64025,
+ Name: "gogoproto.goproto_extensions_map",
+ Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap",
+}
+
+var E_GoprotoUnrecognized = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64026,
+ Name: "gogoproto.goproto_unrecognized",
+ Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized",
+}
+
+var E_Protosizer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64028,
+ Name: "gogoproto.protosizer",
+ Tag: "varint,64028,opt,name=protosizer",
+}
+
+var E_Compare = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64029,
+ Name: "gogoproto.compare",
+ Tag: "varint,64029,opt,name=compare",
+}
+
+var E_Nullable = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 65001,
+ Name: "gogoproto.nullable",
+ Tag: "varint,65001,opt,name=nullable",
+}
+
+var E_Embed = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 65002,
+ Name: "gogoproto.embed",
+ Tag: "varint,65002,opt,name=embed",
+}
+
+var E_Customtype = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65003,
+ Name: "gogoproto.customtype",
+ Tag: "bytes,65003,opt,name=customtype",
+}
+
+var E_Customname = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65004,
+ Name: "gogoproto.customname",
+ Tag: "bytes,65004,opt,name=customname",
+}
+
+var E_Jsontag = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65005,
+ Name: "gogoproto.jsontag",
+ Tag: "bytes,65005,opt,name=jsontag",
+}
+
+var E_Moretags = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65006,
+ Name: "gogoproto.moretags",
+ Tag: "bytes,65006,opt,name=moretags",
+}
+
+var E_Casttype = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65007,
+ Name: "gogoproto.casttype",
+ Tag: "bytes,65007,opt,name=casttype",
+}
+
+var E_Castkey = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65008,
+ Name: "gogoproto.castkey",
+ Tag: "bytes,65008,opt,name=castkey",
+}
+
+var E_Castvalue = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65009,
+ Name: "gogoproto.castvalue",
+ Tag: "bytes,65009,opt,name=castvalue",
+}
+
+func init() {
+ proto.RegisterExtension(E_GoprotoEnumPrefix)
+ proto.RegisterExtension(E_GoprotoEnumStringer)
+ proto.RegisterExtension(E_EnumStringer)
+ proto.RegisterExtension(E_EnumCustomname)
+ proto.RegisterExtension(E_EnumvalueCustomname)
+ proto.RegisterExtension(E_GoprotoGettersAll)
+ proto.RegisterExtension(E_GoprotoEnumPrefixAll)
+ proto.RegisterExtension(E_GoprotoStringerAll)
+ proto.RegisterExtension(E_VerboseEqualAll)
+ proto.RegisterExtension(E_FaceAll)
+ proto.RegisterExtension(E_GostringAll)
+ proto.RegisterExtension(E_PopulateAll)
+ proto.RegisterExtension(E_StringerAll)
+ proto.RegisterExtension(E_OnlyoneAll)
+ proto.RegisterExtension(E_EqualAll)
+ proto.RegisterExtension(E_DescriptionAll)
+ proto.RegisterExtension(E_TestgenAll)
+ proto.RegisterExtension(E_BenchgenAll)
+ proto.RegisterExtension(E_MarshalerAll)
+ proto.RegisterExtension(E_UnmarshalerAll)
+ proto.RegisterExtension(E_StableMarshalerAll)
+ proto.RegisterExtension(E_SizerAll)
+ proto.RegisterExtension(E_GoprotoEnumStringerAll)
+ proto.RegisterExtension(E_EnumStringerAll)
+ proto.RegisterExtension(E_UnsafeMarshalerAll)
+ proto.RegisterExtension(E_UnsafeUnmarshalerAll)
+ proto.RegisterExtension(E_GoprotoExtensionsMapAll)
+ proto.RegisterExtension(E_GoprotoUnrecognizedAll)
+ proto.RegisterExtension(E_GogoprotoImport)
+ proto.RegisterExtension(E_ProtosizerAll)
+ proto.RegisterExtension(E_CompareAll)
+ proto.RegisterExtension(E_GoprotoGetters)
+ proto.RegisterExtension(E_GoprotoStringer)
+ proto.RegisterExtension(E_VerboseEqual)
+ proto.RegisterExtension(E_Face)
+ proto.RegisterExtension(E_Gostring)
+ proto.RegisterExtension(E_Populate)
+ proto.RegisterExtension(E_Stringer)
+ proto.RegisterExtension(E_Onlyone)
+ proto.RegisterExtension(E_Equal)
+ proto.RegisterExtension(E_Description)
+ proto.RegisterExtension(E_Testgen)
+ proto.RegisterExtension(E_Benchgen)
+ proto.RegisterExtension(E_Marshaler)
+ proto.RegisterExtension(E_Unmarshaler)
+ proto.RegisterExtension(E_StableMarshaler)
+ proto.RegisterExtension(E_Sizer)
+ proto.RegisterExtension(E_UnsafeMarshaler)
+ proto.RegisterExtension(E_UnsafeUnmarshaler)
+ proto.RegisterExtension(E_GoprotoExtensionsMap)
+ proto.RegisterExtension(E_GoprotoUnrecognized)
+ proto.RegisterExtension(E_Protosizer)
+ proto.RegisterExtension(E_Compare)
+ proto.RegisterExtension(E_Nullable)
+ proto.RegisterExtension(E_Embed)
+ proto.RegisterExtension(E_Customtype)
+ proto.RegisterExtension(E_Customname)
+ proto.RegisterExtension(E_Jsontag)
+ proto.RegisterExtension(E_Moretags)
+ proto.RegisterExtension(E_Casttype)
+ proto.RegisterExtension(E_Castkey)
+ proto.RegisterExtension(E_Castvalue)
+}
+
+var fileDescriptorGogo = []byte{
+ // 1096 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xcb, 0x6f, 0xdc, 0x54,
+ 0x14, 0x87, 0x85, 0x48, 0x95, 0x99, 0x93, 0x17, 0x99, 0x84, 0x50, 0x2a, 0x10, 0xed, 0x8e, 0x55,
+ 0xba, 0x42, 0xa8, 0xae, 0x10, 0x6a, 0xab, 0x34, 0x2a, 0x22, 0x10, 0x05, 0x52, 0x40, 0x2c, 0x46,
+ 0x9e, 0xc9, 0x8d, 0x3b, 0xe0, 0xf1, 0x35, 0xbe, 0x76, 0xd5, 0xb0, 0x43, 0xe5, 0x21, 0x84, 0x78,
+ 0x23, 0x41, 0x4b, 0xcb, 0x63, 0xc1, 0xfb, 0x59, 0x1e, 0x7b, 0x36, 0xc0, 0x9a, 0xff, 0x81, 0x0d,
+ 0x10, 0x5e, 0x52, 0x76, 0xd9, 0xf4, 0x1e, 0xfb, 0x1c, 0xcf, 0xb5, 0x67, 0xa4, 0x7b, 0x67, 0xe7,
+ 0x64, 0xee, 0xf7, 0xcd, 0xf5, 0x39, 0xbe, 0xe7, 0x37, 0x06, 0x08, 0x64, 0x20, 0x97, 0xe3, 0x44,
+ 0xa6, 0xb2, 0xd5, 0xc4, 0xeb, 0xfc, 0xf2, 0xd0, 0xe1, 0x40, 0xca, 0x20, 0x14, 0x47, 0xf3, 0xbf,
+ 0x3a, 0xd9, 0xf6, 0xd1, 0x2d, 0xa1, 0xba, 0x49, 0x2f, 0x4e, 0x65, 0x52, 0x2c, 0xf6, 0x1e, 0x80,
+ 0x05, 0x5a, 0xdc, 0x16, 0x51, 0xd6, 0x6f, 0xc7, 0x89, 0xd8, 0xee, 0x5d, 0x68, 0xdd, 0xb6, 0x5c,
+ 0x90, 0xcb, 0x4c, 0x2e, 0xaf, 0xe8, 0x4f, 0x1f, 0x8c, 0xd3, 0x9e, 0x8c, 0xd4, 0xc1, 0x6b, 0xbf,
+ 0xdf, 0x78, 0xf8, 0x86, 0x3b, 0x1b, 0x1b, 0xf3, 0x84, 0xe2, 0x67, 0xeb, 0x39, 0xe8, 0x6d, 0xc0,
+ 0xcd, 0x15, 0x9f, 0x4a, 0x93, 0x5e, 0x14, 0x88, 0xc4, 0x62, 0xfc, 0x99, 0x8c, 0x0b, 0x86, 0xf1,
+ 0x21, 0x42, 0xbd, 0x53, 0x30, 0x33, 0x8e, 0xeb, 0x17, 0x72, 0x4d, 0x0b, 0x53, 0xb2, 0x0a, 0x73,
+ 0xb9, 0xa4, 0x9b, 0xa9, 0x54, 0xf6, 0x23, 0xbf, 0x2f, 0x2c, 0x9a, 0x5f, 0x73, 0x4d, 0x73, 0x63,
+ 0x16, 0xb1, 0x53, 0x25, 0xe5, 0x9d, 0x85, 0x45, 0xfc, 0xcf, 0x79, 0x3f, 0xcc, 0x84, 0x69, 0x3b,
+ 0x32, 0xd2, 0x76, 0x16, 0x97, 0xb1, 0xf2, 0xb7, 0x8b, 0x13, 0xb9, 0x72, 0xa1, 0x14, 0x18, 0x5e,
+ 0xa3, 0x13, 0x81, 0x48, 0x53, 0x91, 0xa8, 0xb6, 0x1f, 0x86, 0x23, 0x36, 0x79, 0xba, 0x17, 0x96,
+ 0xc6, 0x4b, 0xbb, 0xd5, 0x4e, 0xac, 0x16, 0xe4, 0x89, 0x30, 0xf4, 0x36, 0xe1, 0x96, 0x11, 0x9d,
+ 0x75, 0x70, 0x5e, 0x26, 0xe7, 0xe2, 0x50, 0x77, 0x51, 0xbb, 0x0e, 0xfc, 0xff, 0xb2, 0x1f, 0x0e,
+ 0xce, 0x77, 0xc9, 0xd9, 0x22, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x60, 0xfe, 0xbc, 0x48, 0x3a, 0x52,
+ 0x89, 0xb6, 0x78, 0x2a, 0xf3, 0x43, 0x07, 0xdd, 0x15, 0xd2, 0xcd, 0x11, 0xb8, 0x82, 0x1c, 0xba,
+ 0x8e, 0x41, 0x63, 0xdb, 0xef, 0x0a, 0x07, 0xc5, 0x55, 0x52, 0x4c, 0xe2, 0x7a, 0x44, 0x4f, 0xc0,
+ 0x74, 0x20, 0x8b, 0x5b, 0x72, 0xc0, 0xdf, 0x23, 0x7c, 0x8a, 0x19, 0x52, 0xc4, 0x32, 0xce, 0x42,
+ 0x3f, 0x75, 0xd9, 0xc1, 0xfb, 0xac, 0x60, 0x86, 0x14, 0x63, 0x94, 0xf5, 0x03, 0x56, 0x28, 0xa3,
+ 0x9e, 0xf7, 0xc2, 0x94, 0x8c, 0xc2, 0x1d, 0x19, 0xb9, 0x6c, 0xe2, 0x43, 0x32, 0x00, 0x21, 0x28,
+ 0x38, 0x0e, 0x4d, 0xd7, 0x46, 0x7c, 0x44, 0x78, 0x43, 0x70, 0x07, 0xf4, 0x39, 0xe3, 0x21, 0xa3,
+ 0x57, 0x38, 0x28, 0x3e, 0x26, 0xc5, 0xac, 0x81, 0xd1, 0x6d, 0xa4, 0x42, 0xa5, 0x81, 0x70, 0x91,
+ 0x7c, 0xc2, 0xb7, 0x41, 0x08, 0x95, 0xb2, 0x23, 0xa2, 0xee, 0x39, 0x37, 0xc3, 0xa7, 0x5c, 0x4a,
+ 0x66, 0x50, 0xa1, 0x27, 0x4f, 0xdf, 0x4f, 0xd4, 0x39, 0x3f, 0x74, 0x6a, 0xc7, 0x67, 0xe4, 0x98,
+ 0x2e, 0x21, 0xaa, 0x48, 0x16, 0x8d, 0xa3, 0xf9, 0x9c, 0x2b, 0x62, 0x60, 0x74, 0xf4, 0x54, 0xea,
+ 0x77, 0x42, 0xd1, 0x1e, 0xc7, 0xf6, 0x05, 0x1f, 0xbd, 0x82, 0x5d, 0x33, 0x8d, 0xba, 0xd3, 0xaa,
+ 0xf7, 0xb4, 0x93, 0xe6, 0x4b, 0xee, 0x74, 0x0e, 0x20, 0xfc, 0x18, 0xdc, 0x3a, 0x72, 0xd4, 0x3b,
+ 0xc8, 0xbe, 0x22, 0xd9, 0xd2, 0x88, 0x71, 0x4f, 0x23, 0x61, 0x5c, 0xe5, 0xd7, 0x3c, 0x12, 0x44,
+ 0xcd, 0xa5, 0xab, 0x96, 0x45, 0xca, 0xdf, 0x1e, 0xaf, 0x6a, 0xdf, 0x70, 0xd5, 0x0a, 0xb6, 0x52,
+ 0xb5, 0x87, 0x61, 0x89, 0x8c, 0xe3, 0xf5, 0xf5, 0x5b, 0x1e, 0xac, 0x05, 0xbd, 0x59, 0xed, 0xee,
+ 0xe3, 0x70, 0xa8, 0x2c, 0xe7, 0x85, 0x54, 0x44, 0x0a, 0x19, 0xbd, 0xe7, 0xd8, 0xc1, 0x7c, 0x8d,
+ 0xcc, 0x3c, 0xf1, 0x57, 0x4a, 0xc1, 0x9a, 0x1f, 0xa3, 0xfc, 0x51, 0x38, 0xc8, 0xf2, 0x2c, 0x4a,
+ 0x44, 0x57, 0x06, 0x91, 0x6e, 0xe3, 0x96, 0x83, 0xfa, 0xbb, 0x5a, 0xab, 0x36, 0x0d, 0x1c, 0xcd,
+ 0x67, 0xe0, 0xa6, 0xf2, 0xf7, 0x46, 0xbb, 0xd7, 0x8f, 0x65, 0x92, 0x5a, 0x8c, 0xdf, 0x73, 0xa7,
+ 0x4a, 0xee, 0x4c, 0x8e, 0x79, 0x2b, 0x30, 0x9b, 0xff, 0xe9, 0xfa, 0x48, 0xfe, 0x40, 0xa2, 0x99,
+ 0x01, 0x45, 0x83, 0xa3, 0x2b, 0xfb, 0xb1, 0x9f, 0xb8, 0xcc, 0xbf, 0x1f, 0x79, 0x70, 0x10, 0x52,
+ 0x3c, 0x7d, 0x73, 0xb5, 0x24, 0x6e, 0xdd, 0x31, 0x24, 0x59, 0x13, 0x4a, 0xf9, 0x41, 0xe9, 0x79,
+ 0x66, 0x8f, 0xce, 0x6c, 0x35, 0x88, 0xbd, 0xfb, 0xb1, 0x3c, 0xd5, 0xb8, 0xb4, 0xcb, 0x2e, 0xee,
+ 0x95, 0x15, 0xaa, 0xa4, 0xa5, 0x77, 0x1a, 0x66, 0x2a, 0x51, 0x69, 0x57, 0x3d, 0x4b, 0xaa, 0x69,
+ 0x33, 0x29, 0xbd, 0xbb, 0x60, 0x02, 0x63, 0xcf, 0x8e, 0x3f, 0x47, 0x78, 0xbe, 0xdc, 0xbb, 0x07,
+ 0x1a, 0x1c, 0x77, 0x76, 0xf4, 0x79, 0x42, 0x4b, 0x04, 0x71, 0x8e, 0x3a, 0x3b, 0xfe, 0x02, 0xe3,
+ 0x8c, 0x20, 0xee, 0x5e, 0xc2, 0x9f, 0x5e, 0x9a, 0xa0, 0x71, 0xc5, 0xb5, 0x3b, 0x0e, 0x93, 0x94,
+ 0x71, 0x76, 0xfa, 0x45, 0xfa, 0x72, 0x26, 0xbc, 0xbb, 0xe1, 0x80, 0x63, 0xc1, 0x5f, 0x26, 0xb4,
+ 0x58, 0xaf, 0x13, 0x64, 0xca, 0xc8, 0x35, 0x3b, 0xfe, 0x0a, 0xe1, 0x26, 0x85, 0x5b, 0xa7, 0x5c,
+ 0xb3, 0x0b, 0x5e, 0xe5, 0xad, 0x13, 0x81, 0x65, 0xe3, 0x48, 0xb3, 0xd3, 0xaf, 0x71, 0xd5, 0x19,
+ 0xd1, 0xa7, 0xa9, 0x59, 0x8e, 0x29, 0x3b, 0xff, 0x3a, 0xf1, 0x03, 0x06, 0x2b, 0x60, 0x8c, 0x49,
+ 0xbb, 0xe2, 0x0d, 0xae, 0x80, 0x41, 0xe1, 0x31, 0xaa, 0x47, 0x9f, 0xdd, 0xf4, 0x26, 0x1f, 0xa3,
+ 0x5a, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc5, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0xea,
+ 0x59, 0x62, 0x77, 0xbc, 0xcd, 0xdb, 0xa8, 0x45, 0x89, 0x4e, 0xa6, 0xd6, 0x70, 0x8e, 0xd8, 0x7d,
+ 0xef, 0x90, 0x6f, 0x7e, 0x28, 0x46, 0xbc, 0x47, 0x60, 0x69, 0x74, 0x86, 0xd8, 0xad, 0x97, 0xf6,
+ 0x6a, 0xbf, 0xfa, 0xcd, 0x08, 0xd1, 0x91, 0xb7, 0x38, 0x2a, 0x3f, 0xec, 0xda, 0xcb, 0x7b, 0xd5,
+ 0x17, 0x3b, 0x33, 0x3e, 0xf4, 0x2f, 0x34, 0x18, 0x8c, 0x6e, 0xbb, 0xeb, 0x0a, 0xb9, 0x0c, 0x08,
+ 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x95, 0x8f, 0x06, 0x11, 0x1a, 0x6e, 0x44, 0x59, 0x18, 0xe2,
+ 0xc3, 0xd1, 0xba, 0x7d, 0x44, 0x4c, 0x88, 0x70, 0x8b, 0xd9, 0x3f, 0xf6, 0xe9, 0x60, 0x30, 0xa0,
+ 0x67, 0xe8, 0x01, 0xd1, 0xef, 0xe8, 0x1a, 0x58, 0xc8, 0x3f, 0xf7, 0x79, 0x20, 0xe0, 0x6a, 0x7d,
+ 0x9e, 0xa0, 0x78, 0x69, 0x4c, 0x77, 0x62, 0xeb, 0xb7, 0xfe, 0xb5, 0x5f, 0xbc, 0x83, 0x1a, 0xc8,
+ 0x40, 0x90, 0xbf, 0x75, 0x5a, 0x04, 0xbb, 0x55, 0x41, 0xfe, 0xa2, 0x79, 0x0c, 0x26, 0x9f, 0x50,
+ 0x32, 0x4a, 0xfd, 0xc0, 0x46, 0xff, 0x4d, 0x34, 0xaf, 0xc7, 0x82, 0xf5, 0x65, 0x22, 0xf4, 0xa5,
+ 0xb2, 0xb1, 0xff, 0x10, 0x5b, 0x02, 0x08, 0x77, 0x7d, 0x95, 0xba, 0xdc, 0xf7, 0xbf, 0x0c, 0x33,
+ 0x80, 0x9b, 0xc6, 0xeb, 0x27, 0xc5, 0x8e, 0x8d, 0xfd, 0x8f, 0x37, 0x4d, 0xeb, 0xf5, 0x00, 0x6c,
+ 0xe2, 0x65, 0xfe, 0xbe, 0x6d, 0x83, 0xff, 0x27, 0x78, 0x40, 0x9c, 0x3c, 0x02, 0x0b, 0xfa, 0x79,
+ 0xa9, 0x63, 0x27, 0x61, 0x55, 0xae, 0xca, 0xf5, 0xfc, 0x41, 0xbc, 0x1e, 0x00, 0x00, 0xff, 0xff,
+ 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
new file mode 100644
index 0000000000..f6502e4b90
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
@@ -0,0 +1,45 @@
+// Code generated by protoc-gen-go.
+// source: gogo.proto
+// DO NOT EDIT!
+
+package gogoproto
+
+import proto "github.com/gogo/protobuf/proto"
+import json "encoding/json"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+// Reference proto, json, and math imports to suppress error if they are not otherwise used.
+var _ = proto.Marshal
+var _ = &json.SyntaxError{}
+var _ = math.Inf
+
+var E_Nullable = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 51235,
+ Name: "gogoproto.nullable",
+ Tag: "varint,51235,opt,name=nullable",
+}
+
+var E_Embed = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 51236,
+ Name: "gogoproto.embed",
+ Tag: "varint,51236,opt,name=embed",
+}
+
+var E_Customtype = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 51237,
+ Name: "gogoproto.customtype",
+ Tag: "bytes,51237,opt,name=customtype",
+}
+
+func init() {
+ proto.RegisterExtension(E_Nullable)
+ proto.RegisterExtension(E_Embed)
+ proto.RegisterExtension(E_Customtype)
+}
diff --git a/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto
new file mode 100644
index 0000000000..e8e3edb61f
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto
@@ -0,0 +1,120 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+package gogoproto;
+
+import "google/protobuf/descriptor.proto";
+
+option java_package = "com.google.protobuf";
+option java_outer_classname = "GoGoProtos";
+
+extend google.protobuf.EnumOptions {
+ optional bool goproto_enum_prefix = 62001;
+ optional bool goproto_enum_stringer = 62021;
+ optional bool enum_stringer = 62022;
+ optional string enum_customname = 62023;
+}
+
+extend google.protobuf.EnumValueOptions {
+ optional string enumvalue_customname = 66001;
+}
+
+extend google.protobuf.FileOptions {
+ optional bool goproto_getters_all = 63001;
+ optional bool goproto_enum_prefix_all = 63002;
+ optional bool goproto_stringer_all = 63003;
+ optional bool verbose_equal_all = 63004;
+ optional bool face_all = 63005;
+ optional bool gostring_all = 63006;
+ optional bool populate_all = 63007;
+ optional bool stringer_all = 63008;
+ optional bool onlyone_all = 63009;
+
+ optional bool equal_all = 63013;
+ optional bool description_all = 63014;
+ optional bool testgen_all = 63015;
+ optional bool benchgen_all = 63016;
+ optional bool marshaler_all = 63017;
+ optional bool unmarshaler_all = 63018;
+ optional bool stable_marshaler_all = 63019;
+
+ optional bool sizer_all = 63020;
+
+ optional bool goproto_enum_stringer_all = 63021;
+ optional bool enum_stringer_all = 63022;
+
+ optional bool unsafe_marshaler_all = 63023;
+ optional bool unsafe_unmarshaler_all = 63024;
+
+ optional bool goproto_extensions_map_all = 63025;
+ optional bool goproto_unrecognized_all = 63026;
+ optional bool gogoproto_import = 63027;
+ optional bool protosizer_all = 63028;
+ optional bool compare_all = 63029;
+}
+
+extend google.protobuf.MessageOptions {
+ optional bool goproto_getters = 64001;
+ optional bool goproto_stringer = 64003;
+ optional bool verbose_equal = 64004;
+ optional bool face = 64005;
+ optional bool gostring = 64006;
+ optional bool populate = 64007;
+ optional bool stringer = 67008;
+ optional bool onlyone = 64009;
+
+ optional bool equal = 64013;
+ optional bool description = 64014;
+ optional bool testgen = 64015;
+ optional bool benchgen = 64016;
+ optional bool marshaler = 64017;
+ optional bool unmarshaler = 64018;
+ optional bool stable_marshaler = 64019;
+
+ optional bool sizer = 64020;
+
+ optional bool unsafe_marshaler = 64023;
+ optional bool unsafe_unmarshaler = 64024;
+
+ optional bool goproto_extensions_map = 64025;
+ optional bool goproto_unrecognized = 64026;
+
+ optional bool protosizer = 64028;
+ optional bool compare = 64029;
+}
+
+extend google.protobuf.FieldOptions {
+ optional bool nullable = 65001;
+ optional bool embed = 65002;
+ optional string customtype = 65003;
+ optional string customname = 65004;
+ optional string jsontag = 65005;
+ optional string moretags = 65006;
+ optional string casttype = 65007;
+ optional string castkey = 65008;
+ optional string castvalue = 65009;
+}
diff --git a/vendor/src/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/src/github.com/gogo/protobuf/gogoproto/helper.go
new file mode 100644
index 0000000000..8c29dbc0e1
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/gogoproto/helper.go
@@ -0,0 +1,308 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gogoproto
+
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+import proto "github.com/gogo/protobuf/proto"
+
+func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
+ return proto.GetBoolExtension(field.Options, E_Embed, false)
+}
+
+func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
+ return proto.GetBoolExtension(field.Options, E_Nullable, true)
+}
+
+func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
+ nullable := IsNullable(field)
+ if field.IsMessage() || IsCustomType(field) {
+ return nullable
+ }
+ if proto3 {
+ return false
+ }
+ return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
+}
+
+func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCustomType(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCastType(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCastKey(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCastValue(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Customtype)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Casttype)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Castkey)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Castvalue)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
+ name := GetCustomName(field)
+ if len(name) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
+ name := GetEnumCustomName(field)
+ if len(name) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
+ name := GetEnumValueCustomName(field)
+ if len(name) > 0 {
+ return true
+ }
+ return false
+}
+
+func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Customname)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_EnumCustomname)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Jsontag)
+ if err == nil && v.(*string) != nil {
+ return (v.(*string))
+ }
+ }
+ return nil
+}
+
+func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Moretags)
+ if err == nil && v.(*string) != nil {
+ return (v.(*string))
+ }
+ }
+ return nil
+}
+
+type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
+
+func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+ return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
+}
+
+func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
+}
+
+func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
+}
+
+func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
+}
+
+func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
+}
+
+func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
+}
+
+func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
+}
+
+func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
+}
+
+func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
+}
+
+func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
+}
+
+func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
+}
+
+func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
+}
+
+func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
+}
+
+func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
+}
+
+func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
+}
+
+func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
+}
+
+func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
+}
+
+func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
+}
+
+func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+ return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
+}
+
+func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+ return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
+}
+
+func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
+}
+
+func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
+}
+
+func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
+}
+
+func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ if IsProto3(file) {
+ return false
+ }
+ return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
+}
+
+func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
+ return file.GetSyntax() == "proto3"
+}
+
+func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
+ return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
+}
+
+func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/Makefile b/vendor/src/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 0000000000..23a6b17344
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto
+ make
diff --git a/vendor/src/github.com/gogo/protobuf/proto/clone.go b/vendor/src/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 0000000000..79edb86119
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,228 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
+ emOut := out.Addr().Interface().(extensionsMap)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+ emOut := out.Addr().Interface().(extensionsBytes)
+ bIn := emIn.GetExtensions()
+ bOut := emOut.GetExtensions()
+ *bOut = append(*bOut, *bIn...)
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/decode.go b/vendor/src/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 0000000000..cb5b213f9b
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ if ee, eok := e.(extensionsMap); eok {
+ ext := ee.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ ee.ExtensionMap()[int32(tag)] = ext
+ } else if ee, eok := e.(extensionsBytes); eok {
+ ext := ee.GetExtensions()
+ *ext = append(*ext, o.buf[oi:o.index]...)
+ }
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go
new file mode 100644
index 0000000000..603dabec3f
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go
@@ -0,0 +1,169 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+// Decode a reference to a struct pointer.
+func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is a pointer receiver")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ bas := structPointer_FieldPointer(base, p.field)
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers ([]struct).
+func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error {
+ newBas := appendStructPointer(base, p.field, p.sstype)
+
+ if is_group {
+ panic("not supported, maybe in future, if requested.")
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is not a pointer receiver.")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, newBas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers.
+func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_ref_struct(p, false, base)
+}
+
+func setPtrCustomType(base structPointer, f field, v interface{}) {
+ if v == nil {
+ return
+ }
+ structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer()))
+}
+
+func setCustomType(base structPointer, f field, value interface{}) {
+ if value == nil {
+ return
+ }
+ v := reflect.ValueOf(value).Elem()
+ t := reflect.TypeOf(value).Elem()
+ kind := t.Kind()
+ switch kind {
+ case reflect.Slice:
+ slice := reflect.MakeSlice(t, v.Len(), v.Cap())
+ reflect.Copy(slice, v)
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ oldHeader.Data = slice.Pointer()
+ oldHeader.Len = v.Len()
+ oldHeader.Cap = v.Cap()
+ default:
+ size := reflect.TypeOf(value).Elem().Size()
+ structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size))
+ }
+}
+
+func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ setPtrCustomType(base, p.field, custom)
+ return nil
+}
+
+func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ if custom != nil {
+ setCustomType(base, p.field, custom)
+ }
+ return nil
+}
+
+// Decode a slice of bytes ([]byte) into a slice of custom types.
+func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ newBas := appendStructPointer(base, p.field, p.ctype)
+
+ setCustomType(newBas, 0, custom)
+
+ return nil
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/encode.go b/vendor/src/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 0000000000..231b07401a
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,1325 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 0000000000..f77cfb1eea
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,354 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// http://github.com/golang/protobuf/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+ return &RequiredNotSetError{field}
+}
+
+type Sizer interface {
+ Size() int
+}
+
+func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, s...)
+ return nil
+}
+
+func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return 0
+ }
+ n += len(s)
+ return
+}
+
+// Encode a reference to bool pointer.
+func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ x := 0
+ if v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_bool(p *Properties, base structPointer) int {
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode a reference to int32 pointer.
+func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a reference to an int64 pointer.
+func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_ref_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a reference to a string pointer.
+func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_ref_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// Encode a reference to a message struct.
+func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+//TODO this is only copied, please fix this
+func size_ref_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a slice of references to message struct pointers ([]struct).
+func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ }
+ return state.err
+}
+
+//TODO this is only copied, please fix this
+func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error {
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return ErrNil
+ }
+ custom := i.(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error {
+ custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_ref_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceAt(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return ErrNil
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ }
+ return nil
+}
+
+func size_custom_slice_bytes(p *Properties, base structPointer) (n int) {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return 0
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ }
+ return
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/equal.go b/vendor/src/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 0000000000..f5db1def3c
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,276 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/extensions.go b/vendor/src/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 0000000000..6180347e39
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,518 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+}
+
+type extensionsMap interface {
+ extendableProto
+ ExtensionMap() map[int32]Extension
+}
+
+type extensionsBytes interface {
+ extendableProto
+ GetExtensions() *[]byte
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ if ebase, ok := base.(extensionsMap); ok {
+ ebase.ExtensionMap()[id] = Extension{enc: b}
+ } else if ebase, ok := base.(extensionsBytes); ok {
+ clearExtension(base, id)
+ ext := ebase.GetExtensions()
+ *ext = append(*ext, b...)
+ } else {
+ panic("unreachable")
+ }
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ err := encodeExtension(&e)
+ if err != nil {
+ return err
+ }
+ m[k] = e
+ }
+ return nil
+}
+
+func encodeExtension(e *Extension) error {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ return nil
+ }
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ if epb, doki := pb.(extensionsMap); doki {
+ _, ok := epb.ExtensionMap()[extension.Field]
+ return ok
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ buf := *ext
+ o := 0
+ for o < len(buf) {
+ tag, n := DecodeVarint(buf[o:])
+ fieldNum := int32(tag >> 3)
+ if int32(fieldNum) == extension.Field {
+ return true
+ }
+ wireType := int(tag & 0x7)
+ o += n
+ l, err := size(buf[o:], wireType)
+ if err != nil {
+ return false
+ }
+ o += l
+ }
+ return false
+ }
+ panic("unreachable")
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+ ext := pb.GetExtensions()
+ for offset < len(*ext) {
+ tag, n1 := DecodeVarint((*ext)[offset:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ n2, err := size((*ext)[offset+n1:], wireType)
+ if err != nil {
+ panic(err)
+ }
+ newOffset := offset + n1 + n2
+ if fieldNum == theFieldNum {
+ *ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+ return offset
+ }
+ offset = newOffset
+ }
+ return -1
+}
+
+func clearExtension(pb extendableProto, fieldNum int32) {
+ if epb, doki := pb.(extensionsMap); doki {
+ delete(epb.ExtensionMap(), fieldNum)
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ offset := 0
+ for offset != -1 {
+ offset = deleteExtension(epb, fieldNum, offset)
+ }
+ } else {
+ panic("unreachable")
+ }
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ clearExtension(pb, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ if epb, doki := pb.(extensionsMap); doki {
+ emap := epb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ o := 0
+ for o < len(*ext) {
+ tag, n := DecodeVarint((*ext)[o:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size((*ext)[o+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ if int32(fieldNum) == extension.Field {
+ v, err := decodeExtension((*ext)[o:o+n+l], extension)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ }
+ o += n + l
+ }
+ return defaultExtensionValue(extension)
+ }
+ panic("unreachable")
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+ return setExtension(pb, extension, value)
+}
+
+func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if epb, doki := pb.(extensionsMap); doki {
+ epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ClearExtension(pb, extension)
+ ext := epb.GetExtensions()
+ et := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+ p := NewBuffer(nil)
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ *ext = append(*ext, p.buf...)
+ }
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 0000000000..86b1fa2344
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,236 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
+ if reflect.ValueOf(pb).IsNil() {
+ return ifnotset
+ }
+ value, err := GetExtension(pb, extension)
+ if err != nil {
+ return ifnotset
+ }
+ if value == nil {
+ return ifnotset
+ }
+ if value.(*bool) == nil {
+ return ifnotset
+ }
+ return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+ return bytes.Equal(this.enc, that.enc)
+}
+
+func (this *Extension) Compare(that *Extension) int {
+ return bytes.Compare(this.enc, that.enc)
+}
+
+func SizeOfExtensionMap(m map[int32]Extension) (n int) {
+ return sizeExtensionMap(m)
+}
+
+type sortableMapElem struct {
+ field int32
+ ext Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+ s := make(sortableExtensions, 0, len(m))
+ for k, v := range m {
+ s = append(s, &sortableMapElem{field: k, ext: v})
+ }
+ return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+ sort.Sort(this)
+ ss := make([]string, len(this))
+ for i := range this {
+ ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+ }
+ return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+ return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+ m, err := BytesToExtensionsMap(ext)
+ if err != nil {
+ panic(err)
+ }
+ return StringFromExtensionsMap(m)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return 0, err
+ }
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ for _, k := range keys {
+ n += copy(data[n:], m[int32(k)].enc)
+ }
+ return n, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+ if m[id].value == nil || m[id].desc == nil {
+ return m[id].enc, nil
+ }
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+ return m[id].enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+ switch wire {
+ case WireVarint:
+ _, n := DecodeVarint(buf)
+ return n, nil
+ case WireFixed64:
+ return 8, nil
+ case WireBytes:
+ v, n := DecodeVarint(buf)
+ return int(v) + n, nil
+ case WireFixed32:
+ return 4, nil
+ case WireStartGroup:
+ offset := 0
+ for {
+ u, n := DecodeVarint(buf[offset:])
+ fwire := int(u & 0x7)
+ offset += n
+ if fwire == WireEndGroup {
+ return offset, nil
+ }
+ s, err := size(buf[offset:], wire)
+ if err != nil {
+ return 0, err
+ }
+ offset += s
+ }
+ }
+ return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+ m := make(map[int32]Extension)
+ i := 0
+ for i < len(buf) {
+ tag, n := DecodeVarint(buf[i:])
+ if n <= 0 {
+ return nil, fmt.Errorf("unable to decode varint")
+ }
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size(buf[i+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ end := i + int(l) + n
+ m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+ i = end
+ }
+ return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+ ee := Extension{enc: make([]byte, len(e))}
+ copy(ee.enc, e)
+ return ee
+}
+
+func AppendExtension(e extendableProto, tag int32, buf []byte) {
+ if ee, eok := e.(extensionsMap); eok {
+ ext := ee.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, buf...)
+ ee.ExtensionMap()[int32(tag)] = ext
+ } else if ee, eok := e.(extensionsBytes); eok {
+ ext := ee.GetExtensions()
+ *ext = append(*ext, buf...)
+ }
+}
+
+func (this Extension) GoString() string {
+ if this.enc == nil {
+ if err := encodeExtension(&this); err != nil {
+ panic(err)
+ }
+ }
+ return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return setExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+ }
+ return GetExtension(pb, desc)
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/lib.go b/vendor/src/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 0000000000..2e35ae2d2a
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,894 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/gogo/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/gogo/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ sindex := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = sindex
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion1 = true
diff --git a/vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 0000000000..a6c2c06b23
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+ s, ok := m[value]
+ if !ok {
+ s = strconv.Itoa(int(value))
+ }
+ return json.Marshal(s)
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/message_set.go b/vendor/src/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 0000000000..e25e01e637
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000000..749919d250
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000000..e9be0fe92e
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 0000000000..6bc85fa987
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ return r.Interface()
+}
+
+func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ if r.Elem().IsNil() {
+ return nil
+ }
+ return r.Elem().Interface()
+}
+
+func copyUintPtr(oldptr, newptr uintptr, size int) {
+ oldbytes := make([]byte, 0)
+ oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
+ oldslice.Data = oldptr
+ oldslice.Len = size
+ oldslice.Cap = size
+ newbytes := make([]byte, 0)
+ newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
+ newslice.Data = newptr
+ newslice.Len = size
+ newslice.Cap = size
+ copy(newbytes, oldbytes)
+}
+
+func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
+ copyUintPtr(uintptr(oldptr), uintptr(newptr), size)
+}
+
+func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
+ size := typ.Elem().Size()
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ newLen := oldHeader.Len + 1
+ slice := reflect.MakeSlice(typ, newLen, newLen)
+ bas := toStructPointer(slice)
+ for i := 0; i < oldHeader.Len; i++ {
+ newElemptr := uintptr(bas) + uintptr(i)*size
+ oldElemptr := oldHeader.Data + uintptr(i)*size
+ copyUintPtr(oldElemptr, newElemptr, int(size))
+ }
+
+ oldHeader.Data = uintptr(bas)
+ oldHeader.Len = newLen
+ oldHeader.Cap = newLen
+
+ return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size)))
+}
+
+func structPointer_FieldPointer(p structPointer, f field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
+ return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
+ return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_Add(p structPointer, size field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size)))
+}
+
+func structPointer_Len(p structPointer, f field) int {
+ return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f))))
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/properties.go b/vendor/src/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 0000000000..5e6a0b3ba7
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,923 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ CustomType string
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sstype reflect.Type // set for slices of structs types only
+ ctype reflect.Type // set for custom types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ case strings.HasPrefix(f, "embedded="):
+ p.OrigName = strings.Split(f, "=")[1]
+ case strings.HasPrefix(f, "customtype="):
+ p.CustomType = strings.Split(f, "=")[1]
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+ if len(p.CustomType) > 0 {
+ p.setCustomEncAndDec(typ)
+ p.setTag(lockGetProp)
+ return
+ }
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ } else {
+ p.enc = (*Buffer).enc_ref_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_ref_bool
+ }
+ case reflect.Int32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ } else {
+ p.enc = (*Buffer).enc_ref_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_int32
+ }
+ case reflect.Uint32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_ref_uint32
+ }
+ case reflect.Int64, reflect.Uint64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.Float32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_uint32
+ }
+ case reflect.Float64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.String:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+ } else {
+ p.enc = (*Buffer).enc_ref_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_ref_string
+ }
+ case reflect.Struct:
+ p.stype = typ
+ p.isMarshaler = isMarshaler(typ)
+ p.isUnmarshaler = isUnmarshaler(typ)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_ref_struct_message
+ p.dec = (*Buffer).dec_ref_struct_message
+ p.size = size_ref_struct_message
+ } else {
+ fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ)
+ }
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ case reflect.Struct:
+ p.setSliceOfNonPointerStructs(t1)
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+ p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ isOneofMessage := false
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ if len(f.Tag.Get("protobuf")) > 0 {
+ p.enc = (*Buffer).enc_ext_slice_byte
+ p.dec = nil // not needed
+ p.size = size_ext_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ if oneof {
+ isOneofMessage = true
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+ if _, ok := enumStringMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 0000000000..8daf9f7768
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+)
+
+func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
+ p.ctype = typ
+ if p.Repeated {
+ p.enc = (*Buffer).enc_custom_slice_bytes
+ p.dec = (*Buffer).dec_custom_slice_bytes
+ p.size = size_custom_slice_bytes
+ } else if typ.Kind() == reflect.Ptr {
+ p.enc = (*Buffer).enc_custom_bytes
+ p.dec = (*Buffer).dec_custom_bytes
+ p.size = size_custom_bytes
+ } else {
+ p.enc = (*Buffer).enc_custom_ref_bytes
+ p.dec = (*Buffer).dec_custom_ref_bytes
+ p.size = size_custom_ref_bytes
+ }
+}
+
+func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
+ t2 := typ.Elem()
+ p.sstype = typ
+ p.stype = t2
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ p.enc = (*Buffer).enc_slice_ref_struct_message
+ p.dec = (*Buffer).dec_slice_ref_struct_message
+ p.size = size_slice_ref_struct_message
+ if p.Wire != "bytes" {
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2)
+ }
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 0000000000..4fe7e0815c
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,117 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "io"
+)
+
+func Skip(data []byte) (n int, err error) {
+ l := len(data)
+ index := 0
+ for index < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ index++
+ if data[index-1] < 0x80 {
+ break
+ }
+ }
+ return index, nil
+ case 1:
+ index += 8
+ return index, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ index += length
+ return index, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = index
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := Skip(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ index = start + next
+ }
+ return index, nil
+ case 4:
+ return index, nil
+ case 5:
+ index += 4
+ return index, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/text.go b/vendor/src/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 0000000000..e2b99b122d
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,793 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, v, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props.Parse(tag) // Overwrite the outer props.
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, fv, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv
+ if pv.CanAddr() {
+ pv = sv.Addr()
+ } else {
+ pv = reflect.New(sv.Type())
+ pv.Elem().Set(sv)
+ }
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ if props != nil && len(props.CustomType) > 0 {
+ custom, ok := v.Interface().(Marshaler)
+ if ok {
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if err := writeString(w, string(data)); err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+ return ferr
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, werr := w.Write(endBraceNewline); werr != nil {
+ return werr
+ }
+ continue
+ }
+ if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+ return ferr
+ }
+ if wire != WireStartGroup {
+ if err = w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err = w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ var m map[int32]Extension
+ if em, ok := ep.(extensionsMap); ok {
+ m = em.ExtensionMap()
+ } else if em, ok := ep.(extensionsBytes); ok {
+ eb := em.GetExtensions()
+ var err error
+ m, err = BytesToExtensionsMap(*eb)
+ if err != nil {
+ return err
+ }
+ }
+
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/src/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 0000000000..cdb23373c3
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+ m, ok := enumStringMaps[props.Enum]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ key := int32(0)
+ if v.Kind() == reflect.Ptr {
+ key = int32(v.Elem().Int())
+ } else {
+ key = int32(v.Int())
+ }
+ s, ok := m[key]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ _, err := fmt.Fprint(w, s)
+ return err
+}
diff --git a/vendor/src/github.com/gogo/protobuf/proto/text_parser.go b/vendor/src/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 0000000000..61b4bc8cc8
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,849 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+ if len(props.CustomType) > 0 {
+ if props.Repeated {
+ t := reflect.TypeOf(v.Interface())
+ if t.Kind() == reflect.Slice {
+ tc := reflect.TypeOf(new(Marshaler))
+ ok := t.Elem().Implements(tc.Elem())
+ if ok {
+ fv := v
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ }
+ }
+ }
+ if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+ custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.ValueOf(custom))
+ } else {
+ custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+ }
+ return nil
+ }
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ ntok := p.next()
+ if ntok.err != nil {
+ return ntok.err
+ }
+ if ntok.value == "]" {
+ break
+ }
+ if ntok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", ntok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
new file mode 100644
index 0000000000..d80ceffee2
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
@@ -0,0 +1,33 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+ protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
diff --git a/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
new file mode 100644
index 0000000000..342d65a425
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
@@ -0,0 +1,2017 @@
+// Code generated by protoc-gen-gogo.
+// source: descriptor.proto
+// DO NOT EDIT!
+
+/*
+Package descriptor is a generated protocol buffer package.
+
+It is generated from these files:
+ descriptor.proto
+
+It has these top-level messages:
+ FileDescriptorSet
+ FileDescriptorProto
+ DescriptorProto
+ FieldDescriptorProto
+ OneofDescriptorProto
+ EnumDescriptorProto
+ EnumValueDescriptorProto
+ ServiceDescriptorProto
+ MethodDescriptorProto
+ FileOptions
+ MessageOptions
+ FieldOptions
+ EnumOptions
+ EnumValueOptions
+ ServiceOptions
+ MethodOptions
+ UninterpretedOption
+ SourceCodeInfo
+*/
+package descriptor
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.GoGoProtoPackageIsVersion1
+
+type FieldDescriptorProto_Type int32
+
+const (
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+ FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3
+ FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5
+ FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+ FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+ FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
+ FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
+ FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
+ FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+ // New in version 2.
+ FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12
+ FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13
+ FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14
+ FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+ FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+ FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17
+ FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+ 1: "TYPE_DOUBLE",
+ 2: "TYPE_FLOAT",
+ 3: "TYPE_INT64",
+ 4: "TYPE_UINT64",
+ 5: "TYPE_INT32",
+ 6: "TYPE_FIXED64",
+ 7: "TYPE_FIXED32",
+ 8: "TYPE_BOOL",
+ 9: "TYPE_STRING",
+ 10: "TYPE_GROUP",
+ 11: "TYPE_MESSAGE",
+ 12: "TYPE_BYTES",
+ 13: "TYPE_UINT32",
+ 14: "TYPE_ENUM",
+ 15: "TYPE_SFIXED32",
+ 16: "TYPE_SFIXED64",
+ 17: "TYPE_SINT32",
+ 18: "TYPE_SINT64",
+}
+var FieldDescriptorProto_Type_value = map[string]int32{
+ "TYPE_DOUBLE": 1,
+ "TYPE_FLOAT": 2,
+ "TYPE_INT64": 3,
+ "TYPE_UINT64": 4,
+ "TYPE_INT32": 5,
+ "TYPE_FIXED64": 6,
+ "TYPE_FIXED32": 7,
+ "TYPE_BOOL": 8,
+ "TYPE_STRING": 9,
+ "TYPE_GROUP": 10,
+ "TYPE_MESSAGE": 11,
+ "TYPE_BYTES": 12,
+ "TYPE_UINT32": 13,
+ "TYPE_ENUM": 14,
+ "TYPE_SFIXED32": 15,
+ "TYPE_SFIXED64": 16,
+ "TYPE_SINT32": 17,
+ "TYPE_SINT64": 18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+ p := new(FieldDescriptorProto_Type)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Type) String() string {
+ return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Type(value)
+ return nil
+}
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{3, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+ // 0 is reserved for errors
+ FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+ FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+ FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+ 1: "LABEL_OPTIONAL",
+ 2: "LABEL_REQUIRED",
+ 3: "LABEL_REPEATED",
+}
+var FieldDescriptorProto_Label_value = map[string]int32{
+ "LABEL_OPTIONAL": 1,
+ "LABEL_REQUIRED": 2,
+ "LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+ p := new(FieldDescriptorProto_Label)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Label) String() string {
+ return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Label(value)
+ return nil
+}
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{3, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+ FileOptions_SPEED FileOptions_OptimizeMode = 1
+ // etc.
+ FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2
+ FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+ 1: "SPEED",
+ 2: "CODE_SIZE",
+ 3: "LITE_RUNTIME",
+}
+var FileOptions_OptimizeMode_value = map[string]int32{
+ "SPEED": 1,
+ "CODE_SIZE": 2,
+ "LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+ p := new(FileOptions_OptimizeMode)
+ *p = x
+ return p
+}
+func (x FileOptions_OptimizeMode) String() string {
+ return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+ if err != nil {
+ return err
+ }
+ *x = FileOptions_OptimizeMode(value)
+ return nil
+}
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{9, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+ // Default mode.
+ FieldOptions_STRING FieldOptions_CType = 0
+ FieldOptions_CORD FieldOptions_CType = 1
+ FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+ 0: "STRING",
+ 1: "CORD",
+ 2: "STRING_PIECE",
+}
+var FieldOptions_CType_value = map[string]int32{
+ "STRING": 0,
+ "CORD": 1,
+ "STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+ p := new(FieldOptions_CType)
+ *p = x
+ return p
+}
+func (x FieldOptions_CType) String() string {
+ return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_CType(value)
+ return nil
+}
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{11, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+ // Use the default type.
+ FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+ // Use JavaScript strings.
+ FieldOptions_JS_STRING FieldOptions_JSType = 1
+ // Use JavaScript numbers.
+ FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+ 0: "JS_NORMAL",
+ 1: "JS_STRING",
+ 2: "JS_NUMBER",
+}
+var FieldOptions_JSType_value = map[string]int32{
+ "JS_NORMAL": 0,
+ "JS_STRING": 1,
+ "JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+ p := new(FieldOptions_JSType)
+ *p = x
+ return p
+}
+func (x FieldOptions_JSType) String() string {
+ return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_JSType(value)
+ return nil
+}
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{11, 1}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+ File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage() {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+ if m != nil {
+ return m.File
+ }
+ return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+ // Names of files imported by this file.
+ Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+ // Indexes of the public imported files in the dependency list above.
+ PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // All top-level definitions in this file.
+ MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+ Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage() {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} }
+
+func (m *FileDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+ if m != nil && m.Package != nil {
+ return *m.Package
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+ if m != nil {
+ return m.Dependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+ if m != nil {
+ return m.PublicDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+ if m != nil {
+ return m.WeakDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+ if m != nil {
+ return m.MessageType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+ if m != nil {
+ return m.Service
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+ if m != nil {
+ return m.SourceCodeInfo
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+ if m != nil && m.Syntax != nil {
+ return *m.Syntax
+ }
+ return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+ NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+ OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+ Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+ ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage() {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} }
+
+func (m *DescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+ if m != nil {
+ return m.NestedType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+ if m != nil {
+ return m.ExtensionRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+ if m != nil {
+ return m.OneofDecl
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+ if m != nil {
+ return m.ReservedRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+ if m != nil {
+ return m.ReservedName
+ }
+ return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage() {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+ Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+ Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage() {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} }
+
+func (m *FieldDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+ if m != nil && m.TypeName != nil {
+ return *m.TypeName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+ if m != nil && m.Extendee != nil {
+ return *m.Extendee
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+ if m != nil && m.DefaultValue != nil {
+ return *m.DefaultValue
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+ if m != nil && m.OneofIndex != nil {
+ return *m.OneofIndex
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+ if m != nil && m.JsonName != nil {
+ return *m.JsonName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage() {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} }
+
+func (m *OneofDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage() {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} }
+
+func (m *EnumDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+ Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage() {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{6}
+}
+
+func (m *EnumValueDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+ Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage() {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{7} }
+
+func (m *ServiceDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+ if m != nil {
+ return m.Method
+ }
+ return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+ OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+ Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+ // Identifies if client streams multiple client messages
+ ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+ // Identifies if server streams multiple server messages
+ ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage() {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} }
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+ if m != nil && m.InputType != nil {
+ return *m.InputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+ if m != nil && m.OutputType != nil {
+ return *m.OutputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+ if m != nil && m.ClientStreaming != nil {
+ return *m.ClientStreaming
+ }
+ return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+ if m != nil && m.ServerStreaming != nil {
+ return *m.ServerStreaming
+ }
+ return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+ // If set true, then the Java code generator will generate equals() and
+ // hashCode() methods for all messages defined in the .proto file.
+ // This increases generated code size, potentially substantially for large
+ // protos, which may harm a memory-constrained application.
+ // - In the full runtime this is a speed optimization, as the
+ // AbstractMessage base class includes reflection-based implementations of
+ // these methods.
+ // - In the lite runtime, setting this option changes the semantics of
+ // equals() and hashCode() to more closely match those of the full runtime;
+ // the generated methods compute their results based on field values rather
+ // than object identity. (Implementations should not assume that hashcodes
+ // will be consistent across runtimes or versions of the protocol compiler.)
+ JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"`
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+ OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+ JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+ PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+ // Namespace for generated classes; defaults to the package.
+ CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+ // Whether the nano proto compiler should generate in the deprecated non-nano
+ // suffixed package.
+ JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileOptions) Reset() { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage() {}
+func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} }
+
+var extRange_FileOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FileOptions
+}
+func (m *FileOptions) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaGenerateEqualsAndHash bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+ if m != nil && m.JavaPackage != nil {
+ return *m.JavaPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+ if m != nil && m.JavaOuterClassname != nil {
+ return *m.JavaOuterClassname
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+ if m != nil && m.JavaMultipleFiles != nil {
+ return *m.JavaMultipleFiles
+ }
+ return Default_FileOptions_JavaMultipleFiles
+}
+
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+ if m != nil && m.JavaGenerateEqualsAndHash != nil {
+ return *m.JavaGenerateEqualsAndHash
+ }
+ return Default_FileOptions_JavaGenerateEqualsAndHash
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+ if m != nil && m.JavaStringCheckUtf8 != nil {
+ return *m.JavaStringCheckUtf8
+ }
+ return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+ if m != nil && m.OptimizeFor != nil {
+ return *m.OptimizeFor
+ }
+ return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+ if m != nil && m.GoPackage != nil {
+ return *m.GoPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+ if m != nil && m.CcGenericServices != nil {
+ return *m.CcGenericServices
+ }
+ return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+ if m != nil && m.JavaGenericServices != nil {
+ return *m.JavaGenericServices
+ }
+ return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+ if m != nil && m.PyGenericServices != nil {
+ return *m.PyGenericServices
+ }
+ return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+ if m != nil && m.CcEnableArenas != nil {
+ return *m.CcEnableArenas
+ }
+ return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+ if m != nil && m.ObjcClassPrefix != nil {
+ return *m.ObjcClassPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+ if m != nil && m.CsharpNamespace != nil {
+ return *m.CsharpNamespace
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool {
+ if m != nil && m.JavananoUseDeprecatedPackage != nil {
+ return *m.JavananoUseDeprecatedPackage
+ }
+ return false
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MessageOptions struct {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageOptions) Reset() { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage() {}
+func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} }
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MessageOptions
+}
+func (m *MessageOptions) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+ if m != nil && m.MessageSetWireFormat != nil {
+ return *m.MessageSetWireFormat
+ }
+ return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+ if m != nil && m.NoStandardDescriptorAccessor != nil {
+ return *m.NoStandardDescriptorAccessor
+ }
+ return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+ if m != nil && m.MapEntry != nil {
+ return *m.MapEntry
+ }
+ return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type FieldOptions struct {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). By default these types are
+ // represented as JavaScript strings. This avoids loss of precision that can
+ // happen when a large value is converted to a floating point JavaScript
+ // numbers. Specifying JS_NUMBER for the jstype causes the generated
+ // JavaScript code to use the JavaScript "number" type instead of strings.
+ // This option is an enum to permit additional types to be added,
+ // e.g. goog.math.Integer.
+ Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outher message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // For Google-internal migration only. Do not use.
+ Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldOptions) Reset() { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage() {}
+func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} }
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FieldOptions
+}
+func (m *FieldOptions) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+ if m != nil && m.Ctype != nil {
+ return *m.Ctype
+ }
+ return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+ if m != nil && m.Packed != nil {
+ return *m.Packed
+ }
+ return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+ if m != nil && m.Jstype != nil {
+ return *m.Jstype
+ }
+ return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+ if m != nil && m.Lazy != nil {
+ return *m.Lazy
+ }
+ return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+ if m != nil && m.Weak != nil {
+ return *m.Weak
+ }
+ return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumOptions struct {
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumOptions) Reset() { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage() {}
+func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} }
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumOptions
+}
+func (m *EnumOptions) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+ if m != nil && m.AllowAlias != nil {
+ return *m.AllowAlias
+ }
+ return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumValueOptions struct {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage() {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} }
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumValueOptions
+}
+func (m *EnumValueOptions) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type ServiceOptions struct {
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage() {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} }
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ServiceOptions
+}
+func (m *ServiceOptions) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MethodOptions struct {
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodOptions) Reset() { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage() {}
+func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} }
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MethodOptions
+}
+func (m *MethodOptions) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+const Default_MethodOptions_Deprecated bool = false
+
+func (m *MethodOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+ PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+ NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+ StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage() {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} }
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+ if m != nil && m.IdentifierValue != nil {
+ return *m.IdentifierValue
+ }
+ return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+ if m != nil && m.PositiveIntValue != nil {
+ return *m.PositiveIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+ if m != nil && m.NegativeIntValue != nil {
+ return *m.NegativeIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+ if m != nil {
+ return m.StringValue
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+ if m != nil && m.AggregateValue != nil {
+ return *m.AggregateValue
+ }
+ return ""
+}
+
+// The name of the uninterpreted option. Each string represents a segment in
+// a dot-separated name. is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+ NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+ IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage() {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{16, 0}
+}
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+ if m != nil && m.NamePart != nil {
+ return *m.NamePart
+ }
+ return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+ if m != nil && m.IsExtension != nil {
+ return *m.IsExtension
+ }
+ return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage() {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} }
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+ if m != nil {
+ return m.Location
+ }
+ return nil
+}
+
+type SourceCodeInfo_Location struct {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+ TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+ LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage() {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{17, 0}
+}
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+ if m != nil {
+ return m.Span
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+ if m != nil && m.LeadingComments != nil {
+ return *m.LeadingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+ if m != nil && m.TrailingComments != nil {
+ return *m.TrailingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+ if m != nil {
+ return m.LeadingDetachedComments
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+ proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+ proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+ proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+ proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+ proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+ proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+ proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+ proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+ proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+ proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+ proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+ proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+ proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+ proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+ proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+ proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+ proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+ proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+ proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+ proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+ proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+ proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+}
+
+var fileDescriptorDescriptor = []byte{
+ // 2192 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6,
+ 0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xac, 0xd8, 0xb4, 0x62, 0xc7, 0x31, 0x63, 0xc7,
+ 0x8e, 0xd3, 0xd2, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8,
+ 0x3e, 0x92, 0xad, 0x93, 0x0b, 0x06, 0x02, 0x1f, 0x29, 0xd8, 0x20, 0xc0, 0x02, 0xa0, 0x6d, 0xe5,
+ 0xd4, 0x99, 0x9e, 0xfa, 0x0d, 0x3a, 0x6d, 0xa7, 0x87, 0x5c, 0x32, 0xd3, 0x0f, 0xd0, 0x43, 0xef,
+ 0xbd, 0xf6, 0xd0, 0x73, 0x8f, 0x9d, 0x69, 0xbf, 0x41, 0xaf, 0xdd, 0xf7, 0x1e, 0x00, 0x02, 0x24,
+ 0x15, 0xab, 0x99, 0x49, 0x13, 0x5d, 0xc4, 0xb7, 0xfb, 0xdb, 0xc5, 0xbe, 0x7d, 0xbf, 0xb7, 0xbb,
+ 0x00, 0x28, 0x63, 0xe6, 0x19, 0xae, 0x39, 0xf7, 0x1d, 0xb7, 0x31, 0x77, 0x1d, 0xdf, 0x21, 0xd5,
+ 0xa9, 0xe3, 0x4c, 0x2d, 0x26, 0x57, 0x27, 0x8b, 0x49, 0xfd, 0x08, 0x76, 0x1e, 0x99, 0x16, 0x6b,
+ 0x47, 0xc0, 0x01, 0xf3, 0xc9, 0x43, 0xc8, 0x4e, 0x50, 0x58, 0x4b, 0xbd, 0x99, 0xb9, 0x5b, 0x7a,
+ 0x70, 0xab, 0xb1, 0x62, 0xd4, 0x48, 0x5a, 0xf4, 0xb9, 0x98, 0x0a, 0x8b, 0xfa, 0x3f, 0xb3, 0x70,
+ 0x69, 0x83, 0x96, 0x10, 0xc8, 0xda, 0xfa, 0x8c, 0x7b, 0x4c, 0xdd, 0x2d, 0x52, 0xf1, 0x9b, 0xd4,
+ 0x60, 0x6b, 0xae, 0x1b, 0xcf, 0xf4, 0x29, 0xab, 0xa5, 0x85, 0x38, 0x5c, 0x92, 0x37, 0x00, 0xc6,
+ 0x6c, 0xce, 0xec, 0x31, 0xb3, 0x8d, 0xb3, 0x5a, 0x06, 0xa3, 0x28, 0xd2, 0x98, 0x84, 0xbc, 0x0b,
+ 0x3b, 0xf3, 0xc5, 0x89, 0x65, 0x1a, 0x5a, 0x0c, 0x06, 0x08, 0xcb, 0x51, 0x45, 0x2a, 0xda, 0x4b,
+ 0xf0, 0x1d, 0xa8, 0xbe, 0x60, 0xfa, 0xb3, 0x38, 0xb4, 0x24, 0xa0, 0x15, 0x2e, 0x8e, 0x01, 0x5b,
+ 0x50, 0x9e, 0x31, 0xcf, 0xc3, 0x00, 0x34, 0xff, 0x6c, 0xce, 0x6a, 0x59, 0xb1, 0xfb, 0x37, 0xd7,
+ 0x76, 0xbf, 0xba, 0xf3, 0x52, 0x60, 0x35, 0x44, 0x23, 0xd2, 0x84, 0x22, 0xb3, 0x17, 0x33, 0xe9,
+ 0x21, 0x77, 0x4e, 0xfe, 0x54, 0x44, 0xac, 0x7a, 0x29, 0x70, 0xb3, 0xc0, 0xc5, 0x96, 0xc7, 0xdc,
+ 0xe7, 0xa6, 0xc1, 0x6a, 0x79, 0xe1, 0xe0, 0xce, 0x9a, 0x83, 0x81, 0xd4, 0xaf, 0xfa, 0x08, 0xed,
+ 0x70, 0x2b, 0x45, 0xf6, 0xd2, 0x67, 0xb6, 0x67, 0x3a, 0x76, 0x6d, 0x4b, 0x38, 0xb9, 0xbd, 0xe1,
+ 0x14, 0x99, 0x35, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc0, 0x96, 0x33, 0xf7, 0xf1, 0x97, 0x57,
+ 0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0xdb, 0x48, 0x84, 0x9e, 0xc4, 0xd0, 0x10, 0x4c, 0x3a, 0xa0,
+ 0x78, 0xce, 0xc2, 0x35, 0x98, 0x66, 0x38, 0x63, 0xa6, 0x99, 0xf6, 0xc4, 0xa9, 0x15, 0x85, 0x83,
+ 0x1b, 0xeb, 0x1b, 0x11, 0xc0, 0x16, 0xe2, 0x3a, 0x08, 0xa3, 0x15, 0x2f, 0xb1, 0x26, 0x97, 0x21,
+ 0xef, 0x9d, 0xd9, 0xbe, 0xfe, 0xb2, 0x56, 0x16, 0x0c, 0x09, 0x56, 0xf5, 0xff, 0xe4, 0xa0, 0x7a,
+ 0x11, 0x8a, 0x7d, 0x04, 0xb9, 0x09, 0xdf, 0x25, 0x12, 0xec, 0x7f, 0xc8, 0x81, 0xb4, 0x49, 0x26,
+ 0x31, 0xff, 0x35, 0x93, 0xd8, 0x84, 0x92, 0xcd, 0x3c, 0x9f, 0x8d, 0x25, 0x23, 0x32, 0x17, 0xe4,
+ 0x14, 0x48, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x16, 0xa5, 0x9e, 0x40, 0x35, 0x0a, 0x49, 0x73, 0x75,
+ 0x7b, 0x1a, 0x72, 0xf3, 0xfe, 0xab, 0x22, 0x69, 0xa8, 0xa1, 0x1d, 0xe5, 0x66, 0xb4, 0xc2, 0x12,
+ 0x6b, 0xd2, 0x06, 0x70, 0x6c, 0xe6, 0x4c, 0xf0, 0x7a, 0x19, 0x16, 0xf2, 0x64, 0x73, 0x96, 0x7a,
+ 0x1c, 0xb2, 0x96, 0x25, 0x47, 0x4a, 0x0d, 0x8b, 0xfc, 0x78, 0x49, 0xb5, 0xad, 0x73, 0x98, 0x72,
+ 0x24, 0x2f, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x19, 0xe7, 0x3d, 0xa6, 0x58, 0xee, 0xac, 0x28,
+ 0x82, 0x68, 0xbc, 0x72, 0x67, 0x34, 0x30, 0x93, 0x1b, 0xdb, 0x76, 0xe3, 0x4b, 0xf2, 0x16, 0x44,
+ 0x02, 0x4d, 0xd0, 0x0a, 0x44, 0x15, 0x2a, 0x87, 0xc2, 0x63, 0x94, 0xed, 0x3d, 0x84, 0x4a, 0x32,
+ 0x3d, 0x64, 0x17, 0x72, 0x9e, 0xaf, 0xbb, 0xbe, 0x60, 0x61, 0x8e, 0xca, 0x05, 0x51, 0x20, 0x83,
+ 0x45, 0x46, 0x54, 0xb9, 0x1c, 0xe5, 0x3f, 0xf7, 0x3e, 0x84, 0xed, 0xc4, 0xe3, 0x2f, 0x6a, 0x58,
+ 0xff, 0x6d, 0x1e, 0x76, 0x37, 0x71, 0x6e, 0x23, 0xfd, 0xf1, 0xfa, 0x20, 0x03, 0x4e, 0x98, 0x8b,
+ 0xbc, 0xe3, 0x1e, 0x82, 0x15, 0x32, 0x2a, 0x67, 0xe9, 0x27, 0xcc, 0x42, 0x36, 0xa5, 0xee, 0x56,
+ 0x1e, 0xbc, 0x7b, 0x21, 0x56, 0x37, 0xba, 0xdc, 0x84, 0x4a, 0x4b, 0xf2, 0x31, 0x64, 0x83, 0x12,
+ 0xc7, 0x3d, 0xdc, 0xbb, 0x98, 0x07, 0xce, 0x45, 0x2a, 0xec, 0xc8, 0xeb, 0x50, 0xe4, 0xff, 0x65,
+ 0x6e, 0xf3, 0x22, 0xe6, 0x02, 0x17, 0xf0, 0xbc, 0x92, 0x3d, 0x28, 0x08, 0x9a, 0x8d, 0x59, 0xd8,
+ 0x1a, 0xa2, 0x35, 0x3f, 0x98, 0x31, 0x9b, 0xe8, 0x0b, 0xcb, 0xd7, 0x9e, 0xeb, 0xd6, 0x82, 0x09,
+ 0xc2, 0xe0, 0xc1, 0x04, 0xc2, 0x9f, 0x73, 0x19, 0xb9, 0x01, 0x25, 0xc9, 0x4a, 0x13, 0x6d, 0x5e,
+ 0x8a, 0xea, 0x93, 0xa3, 0x92, 0xa8, 0x1d, 0x2e, 0xe1, 0x8f, 0x7f, 0xea, 0xe1, 0x5d, 0x08, 0x8e,
+ 0x56, 0x3c, 0x82, 0x0b, 0xc4, 0xe3, 0x3f, 0x5c, 0x2d, 0x7c, 0xd7, 0x37, 0x6f, 0x6f, 0x95, 0x8b,
+ 0xf5, 0x3f, 0xa7, 0x21, 0x2b, 0xee, 0x5b, 0x15, 0x4a, 0xc3, 0x4f, 0xfb, 0xaa, 0xd6, 0xee, 0x8d,
+ 0x0e, 0xba, 0xaa, 0x92, 0x22, 0x15, 0x00, 0x21, 0x78, 0xd4, 0xed, 0x35, 0x87, 0x4a, 0x3a, 0x5a,
+ 0x77, 0x8e, 0x87, 0x1f, 0xfc, 0x48, 0xc9, 0x44, 0x06, 0x23, 0x29, 0xc8, 0xc6, 0x01, 0x3f, 0x7c,
+ 0xa0, 0xe4, 0x90, 0x09, 0x65, 0xe9, 0xa0, 0xf3, 0x44, 0x6d, 0x23, 0x22, 0x9f, 0x94, 0x20, 0x66,
+ 0x8b, 0x6c, 0x43, 0x51, 0x48, 0x0e, 0x7a, 0xbd, 0xae, 0x52, 0x88, 0x7c, 0x0e, 0x86, 0xb4, 0x73,
+ 0x7c, 0xa8, 0x14, 0x23, 0x9f, 0x87, 0xb4, 0x37, 0xea, 0x2b, 0x10, 0x79, 0x38, 0x52, 0x07, 0x83,
+ 0xe6, 0xa1, 0xaa, 0x94, 0x22, 0xc4, 0xc1, 0xa7, 0x43, 0x75, 0xa0, 0x94, 0x13, 0x61, 0xe1, 0x23,
+ 0xb6, 0xa3, 0x47, 0xa8, 0xc7, 0xa3, 0x23, 0xa5, 0x42, 0x76, 0x60, 0x5b, 0x3e, 0x22, 0x0c, 0xa2,
+ 0xba, 0x22, 0xc2, 0x48, 0x95, 0x65, 0x20, 0xd2, 0xcb, 0x4e, 0x42, 0x80, 0x08, 0x52, 0x6f, 0x41,
+ 0x4e, 0xb0, 0x0b, 0x59, 0x5c, 0xe9, 0x36, 0x0f, 0xd4, 0xae, 0xd6, 0xeb, 0x0f, 0x3b, 0xbd, 0xe3,
+ 0x66, 0x17, 0x73, 0x17, 0xc9, 0xa8, 0xfa, 0xb3, 0x51, 0x87, 0xaa, 0x6d, 0xcc, 0x5f, 0x4c, 0xd6,
+ 0x57, 0x9b, 0x43, 0x94, 0x65, 0xea, 0xf7, 0x60, 0x77, 0x53, 0x9d, 0xd9, 0x74, 0x33, 0xea, 0x5f,
+ 0xa4, 0xe0, 0xd2, 0x86, 0x92, 0xb9, 0xf1, 0x16, 0xfd, 0x14, 0x72, 0x92, 0x69, 0xb2, 0x89, 0xbc,
+ 0xb3, 0xb1, 0xf6, 0x0a, 0xde, 0xad, 0x35, 0x12, 0x61, 0x17, 0x6f, 0xa4, 0x99, 0x73, 0x1a, 0x29,
+ 0x77, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x9e, 0xef, 0x57, 0xdc, 0xf7, 0x74, 0xe2, 0xbe,
+ 0x7f, 0xb4, 0x1a, 0xc0, 0xcd, 0xf3, 0xf7, 0xb0, 0x16, 0xc5, 0x97, 0x29, 0xb8, 0xbc, 0x79, 0xde,
+ 0xd8, 0x18, 0xc3, 0xc7, 0x90, 0x9f, 0x31, 0xff, 0xd4, 0x09, 0x7b, 0xee, 0xdb, 0x1b, 0x2a, 0x39,
+ 0x57, 0xaf, 0xe6, 0x2a, 0xb0, 0x8a, 0xb7, 0x82, 0xcc, 0x79, 0x43, 0x83, 0x8c, 0x66, 0x2d, 0xd2,
+ 0xdf, 0xa4, 0xe1, 0xb5, 0x8d, 0xce, 0x37, 0x06, 0x7a, 0x1d, 0xc0, 0xb4, 0xe7, 0x0b, 0x5f, 0xf6,
+ 0x55, 0x59, 0x66, 0x8a, 0x42, 0x22, 0xae, 0x30, 0x2f, 0x21, 0x0b, 0x3f, 0xd2, 0x67, 0x84, 0x1e,
+ 0xa4, 0x48, 0x00, 0x1e, 0x2e, 0x03, 0xcd, 0x8a, 0x40, 0xdf, 0x38, 0x67, 0xa7, 0x6b, 0x2d, 0xeb,
+ 0x3d, 0x50, 0x0c, 0xcb, 0x64, 0xb6, 0xaf, 0x79, 0xbe, 0xcb, 0xf4, 0x99, 0x69, 0x4f, 0x45, 0x1d,
+ 0x2d, 0xec, 0xe7, 0x26, 0xba, 0xe5, 0x31, 0x5a, 0x95, 0xea, 0x41, 0xa8, 0xe5, 0x16, 0xa2, 0x59,
+ 0xb8, 0x31, 0x8b, 0x7c, 0xc2, 0x42, 0xaa, 0x23, 0x8b, 0xfa, 0xdf, 0xb7, 0xa0, 0x14, 0x9b, 0xce,
+ 0xc8, 0x4d, 0x28, 0x3f, 0xd5, 0x9f, 0xeb, 0x5a, 0x38, 0x71, 0xcb, 0x4c, 0x94, 0xb8, 0xac, 0x1f,
+ 0x4c, 0xdd, 0xef, 0xc1, 0xae, 0x80, 0xe0, 0x1e, 0xf1, 0x41, 0x86, 0xa5, 0x7b, 0x9e, 0x48, 0x5a,
+ 0x41, 0x40, 0x09, 0xd7, 0xf5, 0xb8, 0xaa, 0x15, 0x6a, 0xc8, 0xfb, 0x70, 0x49, 0x58, 0xcc, 0xb0,
+ 0xf0, 0x9a, 0x73, 0x8b, 0x69, 0xfc, 0x1d, 0xc0, 0x13, 0xf5, 0x34, 0x8a, 0x6c, 0x87, 0x23, 0x8e,
+ 0x02, 0x00, 0x8f, 0xc8, 0x23, 0x87, 0x70, 0x5d, 0x98, 0x4d, 0x99, 0xcd, 0x5c, 0xdd, 0x67, 0x1a,
+ 0xfb, 0xe5, 0x02, 0xb1, 0x9a, 0x6e, 0x8f, 0xb5, 0x53, 0xdd, 0x3b, 0xad, 0xed, 0xc6, 0x1d, 0x5c,
+ 0xe5, 0xd8, 0xc3, 0x00, 0xaa, 0x0a, 0x64, 0xd3, 0x1e, 0x7f, 0x82, 0x38, 0xb2, 0x0f, 0x97, 0x85,
+ 0x23, 0x4c, 0x0a, 0xee, 0x59, 0x33, 0x4e, 0x99, 0xf1, 0x4c, 0x5b, 0xf8, 0x93, 0x87, 0xb5, 0xd7,
+ 0xe3, 0x1e, 0x44, 0x90, 0x03, 0x81, 0x69, 0x71, 0xc8, 0x08, 0x11, 0x64, 0x00, 0x65, 0x7e, 0x1e,
+ 0x33, 0xf3, 0x73, 0x0c, 0xdb, 0x71, 0x45, 0x8f, 0xa8, 0x6c, 0xb8, 0xdc, 0xb1, 0x24, 0x36, 0x7a,
+ 0x81, 0xc1, 0x11, 0xce, 0xa7, 0xfb, 0xb9, 0x41, 0x5f, 0x55, 0xdb, 0xb4, 0x14, 0x7a, 0x79, 0xe4,
+ 0xb8, 0x9c, 0x53, 0x53, 0x27, 0xca, 0x71, 0x49, 0x72, 0x6a, 0xea, 0x84, 0x19, 0xc6, 0x7c, 0x19,
+ 0x86, 0xdc, 0x36, 0xbe, 0xbb, 0x04, 0xc3, 0xba, 0x57, 0x53, 0x12, 0xf9, 0x32, 0x8c, 0x43, 0x09,
+ 0x08, 0x68, 0xee, 0xe1, 0x95, 0x78, 0x6d, 0x99, 0xaf, 0xb8, 0xe1, 0xce, 0xda, 0x2e, 0x57, 0x4d,
+ 0xf1, 0x89, 0xf3, 0xb3, 0x75, 0x43, 0x92, 0x78, 0xe2, 0xfc, 0x6c, 0xd5, 0xec, 0xb6, 0x78, 0x01,
+ 0x73, 0x99, 0x81, 0x29, 0x1f, 0xd7, 0xae, 0xc4, 0xd1, 0x31, 0x05, 0xb9, 0x8f, 0x44, 0x36, 0x34,
+ 0x66, 0xeb, 0x27, 0x78, 0xf6, 0xba, 0x8b, 0x3f, 0xbc, 0xda, 0x8d, 0x38, 0xb8, 0x62, 0x18, 0xaa,
+ 0xd0, 0x36, 0x85, 0x92, 0xdc, 0x83, 0x1d, 0xe7, 0xe4, 0xa9, 0x21, 0xc9, 0xa5, 0xa1, 0x9f, 0x89,
+ 0xf9, 0xb2, 0x76, 0x4b, 0xa4, 0xa9, 0xca, 0x15, 0x82, 0x5a, 0x7d, 0x21, 0x26, 0xef, 0xa0, 0x73,
+ 0xef, 0x54, 0x77, 0xe7, 0xa2, 0x49, 0x7b, 0x98, 0x54, 0x56, 0xbb, 0x2d, 0xa1, 0x52, 0x7e, 0x1c,
+ 0x8a, 0x89, 0x0a, 0x37, 0xf8, 0xe6, 0x6d, 0xdd, 0x76, 0xb4, 0x85, 0xc7, 0xb4, 0x65, 0x88, 0xd1,
+ 0x59, 0xbc, 0xcd, 0xc3, 0xa2, 0xd7, 0x42, 0xd8, 0xc8, 0xc3, 0x62, 0x16, 0x82, 0xc2, 0xe3, 0x79,
+ 0x02, 0xbb, 0x0b, 0xdb, 0xb4, 0x91, 0xe2, 0xa8, 0xe1, 0xc6, 0xf2, 0xc2, 0xd6, 0xfe, 0xb5, 0x75,
+ 0xce, 0xd0, 0x3d, 0x8a, 0xa3, 0x25, 0x49, 0xe8, 0xa5, 0xc5, 0xba, 0xb0, 0xbe, 0x0f, 0xe5, 0x38,
+ 0x77, 0x48, 0x11, 0x24, 0x7b, 0xb0, 0xbb, 0x61, 0x47, 0x6d, 0xf5, 0xda, 0xbc, 0x17, 0x7e, 0xa6,
+ 0x62, 0x63, 0xc3, 0x9e, 0xdc, 0xed, 0x0c, 0x55, 0x8d, 0x8e, 0x8e, 0x87, 0x9d, 0x23, 0x55, 0xc9,
+ 0xdc, 0x2b, 0x16, 0xfe, 0xbd, 0xa5, 0xfc, 0x0a, 0xff, 0xd2, 0xf5, 0xbf, 0xa6, 0xa1, 0x92, 0x9c,
+ 0x83, 0xc9, 0x4f, 0xe0, 0x4a, 0xf8, 0xd2, 0xea, 0x31, 0x5f, 0x7b, 0x61, 0xba, 0x82, 0xce, 0x33,
+ 0x5d, 0x4e, 0x92, 0xd1, 0x49, 0xec, 0x06, 0x28, 0x7c, 0xbd, 0xff, 0x05, 0x62, 0x1e, 0x09, 0x08,
+ 0xe9, 0xc2, 0x0d, 0x4c, 0x19, 0xce, 0x9a, 0xf6, 0x58, 0x77, 0xc7, 0xda, 0xf2, 0x73, 0x81, 0xa6,
+ 0x1b, 0xc8, 0x03, 0xcf, 0x91, 0x9d, 0x24, 0xf2, 0x72, 0xcd, 0x76, 0x06, 0x01, 0x78, 0x59, 0x62,
+ 0x9b, 0x01, 0x74, 0x85, 0x35, 0x99, 0xf3, 0x58, 0x83, 0xb3, 0xd7, 0x4c, 0x9f, 0x23, 0x6d, 0x7c,
+ 0xf7, 0x4c, 0x4c, 0x6f, 0x05, 0x5a, 0x40, 0x81, 0xca, 0xd7, 0xdf, 0xdc, 0x19, 0xc4, 0xf3, 0xf8,
+ 0x8f, 0x0c, 0x94, 0xe3, 0x13, 0x1c, 0x1f, 0x88, 0x0d, 0x51, 0xe6, 0x53, 0xa2, 0x0a, 0xbc, 0xf5,
+ 0x95, 0xf3, 0x5e, 0xa3, 0xc5, 0xeb, 0xff, 0x7e, 0x5e, 0xce, 0x55, 0x54, 0x5a, 0xf2, 0xde, 0xcb,
+ 0xb9, 0xc6, 0xe4, 0xb4, 0x5e, 0xa0, 0xc1, 0x0a, 0x8b, 0x5d, 0xfe, 0xa9, 0x27, 0x7c, 0xe7, 0x85,
+ 0xef, 0x5b, 0x5f, 0xed, 0xfb, 0xf1, 0x40, 0x38, 0x2f, 0x3e, 0x1e, 0x68, 0xc7, 0x3d, 0x7a, 0xd4,
+ 0xec, 0xd2, 0xc0, 0x9c, 0x5c, 0x85, 0xac, 0xa5, 0x7f, 0x7e, 0x96, 0xec, 0x14, 0x42, 0x74, 0xd1,
+ 0xc4, 0xa3, 0x07, 0xfe, 0xc9, 0x23, 0x59, 0x9f, 0x85, 0xe8, 0x1b, 0xa4, 0xfe, 0x7d, 0xc8, 0x89,
+ 0x7c, 0x11, 0x80, 0x20, 0x63, 0xca, 0xf7, 0x48, 0x01, 0xb2, 0xad, 0x1e, 0xe5, 0xf4, 0x47, 0xbe,
+ 0x4b, 0xa9, 0xd6, 0xef, 0xa8, 0x2d, 0xbc, 0x01, 0xf5, 0xf7, 0x21, 0x2f, 0x93, 0xc0, 0xaf, 0x46,
+ 0x94, 0x06, 0x34, 0x92, 0xcb, 0xc0, 0x47, 0x2a, 0xd4, 0x8e, 0x8e, 0x0e, 0x54, 0xaa, 0xa4, 0xe3,
+ 0xc7, 0xfb, 0x97, 0x14, 0x94, 0x62, 0x03, 0x15, 0x6f, 0xe5, 0xba, 0x65, 0x39, 0x2f, 0x34, 0xdd,
+ 0x32, 0xb1, 0x42, 0xc9, 0xf3, 0x01, 0x21, 0x6a, 0x72, 0xc9, 0x45, 0xf3, 0xf7, 0x7f, 0xe1, 0xe6,
+ 0x1f, 0x53, 0xa0, 0xac, 0x0e, 0x63, 0x2b, 0x01, 0xa6, 0xbe, 0xd5, 0x00, 0xff, 0x90, 0x82, 0x4a,
+ 0x72, 0x02, 0x5b, 0x09, 0xef, 0xe6, 0xb7, 0x1a, 0xde, 0xef, 0x53, 0xb0, 0x9d, 0x98, 0xbb, 0xbe,
+ 0x53, 0xd1, 0xfd, 0x2e, 0x03, 0x97, 0x36, 0xd8, 0x61, 0x01, 0x92, 0x03, 0xaa, 0x9c, 0x99, 0x7f,
+ 0x70, 0x91, 0x67, 0x35, 0x78, 0xff, 0xeb, 0xeb, 0xae, 0x1f, 0xcc, 0xb3, 0xd8, 0x2f, 0xcd, 0x31,
+ 0x16, 0x55, 0x73, 0x62, 0xe2, 0xf8, 0x26, 0xdf, 0x58, 0xe4, 0xd4, 0x5a, 0x5d, 0xca, 0xe5, 0xeb,
+ 0xf1, 0xf7, 0x81, 0xcc, 0x1d, 0xcf, 0xf4, 0xcd, 0xe7, 0xfc, 0xf3, 0x5c, 0xf8, 0x22, 0xcd, 0xa7,
+ 0xd8, 0x2c, 0x55, 0x42, 0x4d, 0xc7, 0xf6, 0x23, 0xb4, 0xcd, 0xa6, 0xfa, 0x0a, 0x9a, 0x97, 0xa1,
+ 0x0c, 0x55, 0x42, 0x4d, 0x84, 0xc6, 0x41, 0x73, 0xec, 0x2c, 0xf8, 0x40, 0x20, 0x71, 0xbc, 0xea,
+ 0xa5, 0x68, 0x49, 0xca, 0x22, 0x48, 0x30, 0xb1, 0x2d, 0xdf, 0xe0, 0xcb, 0xb4, 0x24, 0x65, 0x12,
+ 0x72, 0x07, 0xaa, 0xfa, 0x74, 0xea, 0x72, 0xe7, 0xa1, 0x23, 0x39, 0x86, 0x56, 0x22, 0xb1, 0x00,
+ 0xee, 0x3d, 0x86, 0x42, 0x98, 0x07, 0xde, 0x58, 0x78, 0x26, 0xb0, 0xe7, 0x8b, 0xef, 0x28, 0x69,
+ 0xfe, 0x52, 0x6f, 0x87, 0x4a, 0x7c, 0xa8, 0xe9, 0x69, 0xcb, 0x0f, 0x7a, 0x69, 0xd4, 0x17, 0x68,
+ 0xc9, 0xf4, 0xa2, 0x2f, 0x38, 0xf5, 0x2f, 0xb1, 0xbd, 0x26, 0x3f, 0x48, 0x92, 0x36, 0x14, 0x2c,
+ 0x07, 0xf9, 0xc1, 0x2d, 0xe4, 0xd7, 0xf0, 0xbb, 0xaf, 0xf8, 0x86, 0xd9, 0xe8, 0x06, 0x78, 0x1a,
+ 0x59, 0xee, 0xfd, 0x2d, 0x05, 0x85, 0x50, 0x8c, 0x8d, 0x22, 0x3b, 0xd7, 0xfd, 0x53, 0xe1, 0x2e,
+ 0x77, 0x90, 0x56, 0x52, 0x54, 0xac, 0xb9, 0x1c, 0xa7, 0x19, 0x5b, 0x50, 0x20, 0x90, 0xf3, 0x35,
+ 0x3f, 0x57, 0x8b, 0xe9, 0x63, 0x31, 0xe0, 0x3a, 0xb3, 0x19, 0x9e, 0xa4, 0x17, 0x9e, 0x6b, 0x20,
+ 0x6f, 0x05, 0x62, 0xfe, 0x5d, 0xdc, 0x77, 0x75, 0xd3, 0x4a, 0x60, 0xb3, 0x02, 0xab, 0x84, 0x8a,
+ 0x08, 0xbc, 0x0f, 0x57, 0x43, 0xbf, 0x63, 0xe6, 0xeb, 0x38, 0x3c, 0x8f, 0x97, 0x46, 0x79, 0xf1,
+ 0xb5, 0xeb, 0x4a, 0x00, 0x68, 0x07, 0xfa, 0xd0, 0xf6, 0xe0, 0x09, 0x0e, 0xb2, 0xce, 0x6c, 0x35,
+ 0x13, 0x07, 0xca, 0xca, 0x7b, 0x97, 0xf7, 0x49, 0xea, 0x33, 0x58, 0x0e, 0x15, 0x5f, 0xa4, 0x33,
+ 0x87, 0xfd, 0x83, 0x3f, 0xa5, 0xf7, 0x0e, 0xa5, 0x5d, 0x3f, 0xcc, 0x20, 0x65, 0x13, 0x8b, 0x19,
+ 0x3c, 0x3b, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, 0x18, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go
new file mode 100644
index 0000000000..76e2c95f9b
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go
@@ -0,0 +1,635 @@
+package descriptor
+
+import fmt "fmt"
+
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+
+func (this *FileDescriptorSet) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&descriptor.FileDescriptorSet{")
+ if this.File != nil {
+ s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FileDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 16)
+ s = append(s, "&descriptor.FileDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Package != nil {
+ s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
+ }
+ if this.Dependency != nil {
+ s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
+ }
+ if this.PublicDependency != nil {
+ s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
+ }
+ if this.WeakDependency != nil {
+ s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
+ }
+ if this.MessageType != nil {
+ s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
+ }
+ if this.EnumType != nil {
+ s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+ }
+ if this.Service != nil {
+ s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
+ }
+ if this.Extension != nil {
+ s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.SourceCodeInfo != nil {
+ s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
+ }
+ if this.Syntax != nil {
+ s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 14)
+ s = append(s, "&descriptor.DescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Field != nil {
+ s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
+ }
+ if this.Extension != nil {
+ s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+ }
+ if this.NestedType != nil {
+ s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
+ }
+ if this.EnumType != nil {
+ s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+ }
+ if this.ExtensionRange != nil {
+ s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
+ }
+ if this.OneofDecl != nil {
+ s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.ReservedRange != nil {
+ s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
+ }
+ if this.ReservedName != nil {
+ s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DescriptorProto_ExtensionRange) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
+ if this.Start != nil {
+ s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+ }
+ if this.End != nil {
+ s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DescriptorProto_ReservedRange) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
+ if this.Start != nil {
+ s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+ }
+ if this.End != nil {
+ s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FieldDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 14)
+ s = append(s, "&descriptor.FieldDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Number != nil {
+ s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+ }
+ if this.Label != nil {
+ s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n")
+ }
+ if this.Type != nil {
+ s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n")
+ }
+ if this.TypeName != nil {
+ s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
+ }
+ if this.Extendee != nil {
+ s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
+ }
+ if this.DefaultValue != nil {
+ s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
+ }
+ if this.OneofIndex != nil {
+ s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
+ }
+ if this.JsonName != nil {
+ s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *OneofDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&descriptor.OneofDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.EnumDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Value != nil {
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumValueDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.EnumValueDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Number != nil {
+ s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ServiceDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.ServiceDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Method != nil {
+ s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MethodDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&descriptor.MethodDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.InputType != nil {
+ s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
+ }
+ if this.OutputType != nil {
+ s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.ClientStreaming != nil {
+ s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
+ }
+ if this.ServerStreaming != nil {
+ s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FileOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 20)
+ s = append(s, "&descriptor.FileOptions{")
+ if this.JavaPackage != nil {
+ s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
+ }
+ if this.JavaOuterClassname != nil {
+ s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
+ }
+ if this.JavaMultipleFiles != nil {
+ s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
+ }
+ if this.JavaGenerateEqualsAndHash != nil {
+ s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
+ }
+ if this.JavaStringCheckUtf8 != nil {
+ s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
+ }
+ if this.OptimizeFor != nil {
+ s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n")
+ }
+ if this.GoPackage != nil {
+ s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
+ }
+ if this.CcGenericServices != nil {
+ s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
+ }
+ if this.JavaGenericServices != nil {
+ s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
+ }
+ if this.PyGenericServices != nil {
+ s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.CcEnableArenas != nil {
+ s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
+ }
+ if this.ObjcClassPrefix != nil {
+ s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
+ }
+ if this.CsharpNamespace != nil {
+ s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
+ }
+ if this.JavananoUseDeprecatedPackage != nil {
+ s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ if this.XXX_extensions != nil {
+ s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MessageOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&descriptor.MessageOptions{")
+ if this.MessageSetWireFormat != nil {
+ s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
+ }
+ if this.NoStandardDescriptorAccessor != nil {
+ s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.MapEntry != nil {
+ s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ if this.XXX_extensions != nil {
+ s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FieldOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 11)
+ s = append(s, "&descriptor.FieldOptions{")
+ if this.Ctype != nil {
+ s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n")
+ }
+ if this.Packed != nil {
+ s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
+ }
+ if this.Jstype != nil {
+ s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n")
+ }
+ if this.Lazy != nil {
+ s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.Weak != nil {
+ s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ if this.XXX_extensions != nil {
+ s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.EnumOptions{")
+ if this.AllowAlias != nil {
+ s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ if this.XXX_extensions != nil {
+ s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumValueOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.EnumValueOptions{")
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ if this.XXX_extensions != nil {
+ s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ServiceOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.ServiceOptions{")
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ if this.XXX_extensions != nil {
+ s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MethodOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.MethodOptions{")
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ if this.XXX_extensions != nil {
+ s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UninterpretedOption) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 11)
+ s = append(s, "&descriptor.UninterpretedOption{")
+ if this.Name != nil {
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ }
+ if this.IdentifierValue != nil {
+ s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
+ }
+ if this.PositiveIntValue != nil {
+ s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
+ }
+ if this.NegativeIntValue != nil {
+ s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
+ }
+ if this.DoubleValue != nil {
+ s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
+ }
+ if this.StringValue != nil {
+ s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
+ }
+ if this.AggregateValue != nil {
+ s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UninterpretedOption_NamePart) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.UninterpretedOption_NamePart{")
+ if this.NamePart != nil {
+ s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
+ }
+ if this.IsExtension != nil {
+ s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SourceCodeInfo) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&descriptor.SourceCodeInfo{")
+ if this.Location != nil {
+ s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SourceCodeInfo_Location) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&descriptor.SourceCodeInfo_Location{")
+ if this.Path != nil {
+ s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+ }
+ if this.Span != nil {
+ s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
+ }
+ if this.LeadingComments != nil {
+ s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
+ }
+ if this.TrailingComments != nil {
+ s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
+ }
+ if this.LeadingDetachedComments != nil {
+ s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringDescriptor(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringDescriptor(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+ if e == nil {
+ return "nil"
+ }
+ s := "map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "}"
+ return s
+}
diff --git a/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
new file mode 100644
index 0000000000..ab170f913a
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
@@ -0,0 +1,355 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package descriptor
+
+import (
+ "strings"
+)
+
+func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
+ if !msg.GetOptions().GetMapEntry() {
+ return nil, nil
+ }
+ return msg.GetField()[0], msg.GetField()[1]
+}
+
+func dotToUnderscore(r rune) rune {
+ if r == '.' {
+ return '_'
+ }
+ return r
+}
+
+func (field *FieldDescriptorProto) WireType() (wire int) {
+ switch *field.Type {
+ case FieldDescriptorProto_TYPE_DOUBLE:
+ return 1
+ case FieldDescriptorProto_TYPE_FLOAT:
+ return 5
+ case FieldDescriptorProto_TYPE_INT64:
+ return 0
+ case FieldDescriptorProto_TYPE_UINT64:
+ return 0
+ case FieldDescriptorProto_TYPE_INT32:
+ return 0
+ case FieldDescriptorProto_TYPE_UINT32:
+ return 0
+ case FieldDescriptorProto_TYPE_FIXED64:
+ return 1
+ case FieldDescriptorProto_TYPE_FIXED32:
+ return 5
+ case FieldDescriptorProto_TYPE_BOOL:
+ return 0
+ case FieldDescriptorProto_TYPE_STRING:
+ return 2
+ case FieldDescriptorProto_TYPE_GROUP:
+ return 2
+ case FieldDescriptorProto_TYPE_MESSAGE:
+ return 2
+ case FieldDescriptorProto_TYPE_BYTES:
+ return 2
+ case FieldDescriptorProto_TYPE_ENUM:
+ return 0
+ case FieldDescriptorProto_TYPE_SFIXED32:
+ return 5
+ case FieldDescriptorProto_TYPE_SFIXED64:
+ return 1
+ case FieldDescriptorProto_TYPE_SINT32:
+ return 0
+ case FieldDescriptorProto_TYPE_SINT64:
+ return 0
+ }
+ panic("unreachable")
+}
+
+func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
+ packed := field.IsPacked()
+ wireType := field.WireType()
+ fieldNumber := field.GetNumber()
+ if packed {
+ wireType = 2
+ }
+ x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
+ return x
+}
+
+func (field *FieldDescriptorProto) GetKey() []byte {
+ x := field.GetKeyUint64()
+ i := 0
+ keybuf := make([]byte, 0)
+ for i = 0; x > 127; i++ {
+ keybuf = append(keybuf, 0x80|uint8(x&0x7F))
+ x >>= 7
+ }
+ keybuf = append(keybuf, uint8(x))
+ return keybuf
+}
+
+func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
+ msg := desc.GetMessage(packageName, messageName)
+ if msg == nil {
+ return nil
+ }
+ for _, field := range msg.GetField() {
+ if field.GetName() == fieldName {
+ return field
+ }
+ }
+ return nil
+}
+
+func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
+ for _, msg := range file.GetMessageType() {
+ if msg.GetName() == typeName {
+ return msg
+ }
+ nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
+ if nes != nil {
+ return nes
+ }
+ }
+ return nil
+}
+
+func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
+ for _, nes := range msg.GetNestedType() {
+ if nes.GetName() == typeName {
+ return nes
+ }
+ res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
+ if res != nil {
+ return res
+ }
+ }
+ return nil
+}
+
+func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
+ for _, file := range desc.GetFile() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+ continue
+ }
+ for _, msg := range file.GetMessageType() {
+ if msg.GetName() == typeName {
+ return msg
+ }
+ }
+ for _, msg := range file.GetMessageType() {
+ for _, nes := range msg.GetNestedType() {
+ if nes.GetName() == typeName {
+ return nes
+ }
+ if msg.GetName()+"."+nes.GetName() == typeName {
+ return nes
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
+ for _, file := range desc.GetFile() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+ continue
+ }
+ for _, msg := range file.GetMessageType() {
+ if msg.GetName() == typeName {
+ return file.GetSyntax() == "proto3"
+ }
+ }
+ for _, msg := range file.GetMessageType() {
+ for _, nes := range msg.GetNestedType() {
+ if nes.GetName() == typeName {
+ return file.GetSyntax() == "proto3"
+ }
+ if msg.GetName()+"."+nes.GetName() == typeName {
+ return file.GetSyntax() == "proto3"
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (msg *DescriptorProto) IsExtendable() bool {
+ return len(msg.GetExtensionRange()) > 0
+}
+
+func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
+ parent := desc.GetMessage(packageName, typeName)
+ if parent == nil {
+ return "", nil
+ }
+ if !parent.IsExtendable() {
+ return "", nil
+ }
+ extendee := "." + packageName + "." + typeName
+ for _, file := range desc.GetFile() {
+ for _, ext := range file.GetExtension() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+ if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+ continue
+ }
+ } else {
+ if ext.GetExtendee() != extendee {
+ continue
+ }
+ }
+ if ext.GetName() == fieldName {
+ return file.GetPackage(), ext
+ }
+ }
+ }
+ return "", nil
+}
+
+func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
+ parent := desc.GetMessage(packageName, typeName)
+ if parent == nil {
+ return "", nil
+ }
+ if !parent.IsExtendable() {
+ return "", nil
+ }
+ extendee := "." + packageName + "." + typeName
+ for _, file := range desc.GetFile() {
+ for _, ext := range file.GetExtension() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+ if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+ continue
+ }
+ } else {
+ if ext.GetExtendee() != extendee {
+ continue
+ }
+ }
+ if ext.GetNumber() == fieldNum {
+ return file.GetPackage(), ext
+ }
+ }
+ }
+ return "", nil
+}
+
+func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
+ parent := desc.GetMessage(packageName, typeName)
+ if parent == nil {
+ return "", ""
+ }
+ field := parent.GetFieldDescriptor(fieldName)
+ if field == nil {
+ var extPackageName string
+ extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
+ if field == nil {
+ return "", ""
+ }
+ packageName = extPackageName
+ }
+ typeNames := strings.Split(field.GetTypeName(), ".")
+ if len(typeNames) == 1 {
+ msg := desc.GetMessage(packageName, typeName)
+ if msg == nil {
+ return "", ""
+ }
+ return packageName, msg.GetName()
+ }
+ if len(typeNames) > 2 {
+ for i := 1; i < len(typeNames)-1; i++ {
+ packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
+ typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
+ msg := desc.GetMessage(packageName, typeName)
+ if msg != nil {
+ typeNames := strings.Split(msg.GetName(), ".")
+ if len(typeNames) == 1 {
+ return packageName, msg.GetName()
+ }
+ return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
+ }
+ }
+ }
+ return "", ""
+}
+
+func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
+ for _, field := range msg.GetField() {
+ if field.GetName() == fieldName {
+ return field
+ }
+ }
+ return nil
+}
+
+func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
+ for _, file := range desc.GetFile() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+ continue
+ }
+ for _, enum := range file.GetEnumType() {
+ if enum.GetName() == typeName {
+ return enum
+ }
+ }
+ }
+ return nil
+}
+
+func (f *FieldDescriptorProto) IsEnum() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_ENUM
+}
+
+func (f *FieldDescriptorProto) IsMessage() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
+}
+
+func (f *FieldDescriptorProto) IsBytes() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_BYTES
+}
+
+func (f *FieldDescriptorProto) IsRepeated() bool {
+ return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
+}
+
+func (f *FieldDescriptorProto) IsString() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_STRING
+}
+
+func (f *FieldDescriptorProto) IsBool() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_BOOL
+}
+
+func (f *FieldDescriptorProto) IsRequired() bool {
+ return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
+}
+
+func (f *FieldDescriptorProto) IsPacked() bool {
+ return f.Options != nil && f.GetOptions().GetPacked()
+}
+
+func (m *DescriptorProto) HasExtension() bool {
+ return len(m.ExtensionRange) > 0
+}
diff --git a/vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go
new file mode 100644
index 0000000000..c52878dd59
--- /dev/null
+++ b/vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package sortkeys
+
+import (
+ "sort"
+)
+
+func Strings(l []string) {
+ sort.Strings(l)
+}
+
+func Float64s(l []float64) {
+ sort.Float64s(l)
+}
+
+func Float32s(l []float32) {
+ sort.Sort(Float32Slice(l))
+}
+
+func Int64s(l []int64) {
+ sort.Sort(Int64Slice(l))
+}
+
+func Int32s(l []int32) {
+ sort.Sort(Int32Slice(l))
+}
+
+func Uint64s(l []uint64) {
+ sort.Sort(Uint64Slice(l))
+}
+
+func Uint32s(l []uint32) {
+ sort.Sort(Uint32Slice(l))
+}
+
+func Bools(l []bool) {
+ sort.Sort(BoolSlice(l))
+}
+
+type BoolSlice []bool
+
+func (p BoolSlice) Len() int { return len(p) }
+func (p BoolSlice) Less(i, j int) bool { return p[j] }
+func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Int64Slice []int64
+
+func (p Int64Slice) Len() int { return len(p) }
+func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Int32Slice []int32
+
+func (p Int32Slice) Len() int { return len(p) }
+func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Uint32Slice []uint32
+
+func (p Uint32Slice) Len() int { return len(p) }
+func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Float32Slice []float32
+
+func (p Float32Slice) Len() int { return len(p) }
+func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/src/github.com/golang/mock/LICENSE b/vendor/src/github.com/golang/mock/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/src/github.com/golang/mock/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/golang/mock/gomock/call.go b/vendor/src/github.com/golang/mock/gomock/call.go
new file mode 100644
index 0000000000..c5601970e1
--- /dev/null
+++ b/vendor/src/github.com/golang/mock/gomock/call.go
@@ -0,0 +1,268 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gomock
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Call represents an expected call to a mock.
+type Call struct {
+ t TestReporter // for triggering test failures on invalid call setup
+
+ receiver interface{} // the receiver of the method call
+ method string // the name of the method
+ args []Matcher // the args
+ rets []interface{} // the return values (if any)
+
+ preReqs []*Call // prerequisite calls
+
+ // Expectations
+ minCalls, maxCalls int
+
+ numCalls int // actual number made
+
+ // Actions
+ doFunc reflect.Value
+ setArgs map[int]reflect.Value
+}
+
+// AnyTimes allows the expectation to be called 0 or more times
+func (c *Call) AnyTimes() *Call {
+ c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
+ return c
+}
+
+// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called, MinTimes also
+// sets the maximum number of calls to infinity.
+func (c *Call) MinTimes(n int) *Call {
+ c.minCalls = n
+ if c.maxCalls == 1 {
+ c.maxCalls = 1e8
+ }
+ return c
+}
+
+// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called, MaxTimes also
+// sets the minimum number of calls to 0.
+func (c *Call) MaxTimes(n int) *Call {
+ c.maxCalls = n
+ if c.minCalls == 1 {
+ c.minCalls = 0
+ }
+ return c
+}
+
+// Do declares the action to run when the call is matched.
+// It takes an interface{} argument to support n-arity functions.
+func (c *Call) Do(f interface{}) *Call {
+ // TODO: Check arity and types here, rather than dying badly elsewhere.
+ c.doFunc = reflect.ValueOf(f)
+ return c
+}
+
+func (c *Call) Return(rets ...interface{}) *Call {
+ mt := c.methodType()
+ if len(rets) != mt.NumOut() {
+ c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d",
+ c.receiver, c.method, len(rets), mt.NumOut())
+ }
+ for i, ret := range rets {
+ if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
+ // Identical types; nothing to do.
+ } else if got == nil {
+ // Nil needs special handling.
+ switch want.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ // ok
+ default:
+ c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable",
+ i, c.receiver, c.method, want)
+ }
+ } else if got.AssignableTo(want) {
+ // Assignable type relation. Make the assignment now so that the generated code
+ // can return the values with a type assertion.
+ v := reflect.New(want).Elem()
+ v.Set(reflect.ValueOf(ret))
+ rets[i] = v.Interface()
+ } else {
+ c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v",
+ i, c.receiver, c.method, got, want)
+ }
+ }
+
+ c.rets = rets
+ return c
+}
+
+func (c *Call) Times(n int) *Call {
+ c.minCalls, c.maxCalls = n, n
+ return c
+}
+
+// SetArg declares an action that will set the nth argument's value,
+// indirected through a pointer.
+func (c *Call) SetArg(n int, value interface{}) *Call {
+ if c.setArgs == nil {
+ c.setArgs = make(map[int]reflect.Value)
+ }
+ mt := c.methodType()
+ // TODO: This will break on variadic methods.
+ // We will need to check those at invocation time.
+ if n < 0 || n >= mt.NumIn() {
+ c.t.Fatalf("SetArg(%d, ...) called for a method with %d args", n, mt.NumIn())
+ }
+ // Permit setting argument through an interface.
+ // In the interface case, we don't (nay, can't) check the type here.
+ at := mt.In(n)
+ switch at.Kind() {
+ case reflect.Ptr:
+ dt := at.Elem()
+ if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
+ c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v", n, vt, dt)
+ }
+ case reflect.Interface:
+ // nothing to do
+ default:
+ c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface type %v", n, at)
+ }
+ c.setArgs[n] = reflect.ValueOf(value)
+ return c
+}
+
+// isPreReq returns true if other is a direct or indirect prerequisite to c.
+func (c *Call) isPreReq(other *Call) bool {
+ for _, preReq := range c.preReqs {
+ if other == preReq || preReq.isPreReq(other) {
+ return true
+ }
+ }
+ return false
+}
+
+// After declares that the call may only match after preReq has been exhausted.
+func (c *Call) After(preReq *Call) *Call {
+ if preReq.isPreReq(c) {
+ msg := fmt.Sprintf(
+ "Loop in call order: %v is a prerequisite to %v (possibly indirectly).",
+ c, preReq,
+ )
+ panic(msg)
+ }
+
+ c.preReqs = append(c.preReqs, preReq)
+ return c
+}
+
+// Returns true iff the minimum number of calls have been made.
+func (c *Call) satisfied() bool {
+ return c.numCalls >= c.minCalls
+}
+
+// Returns true iff the maximum number of calls have been made.
+func (c *Call) exhausted() bool {
+ return c.numCalls >= c.maxCalls
+}
+
+func (c *Call) String() string {
+ args := make([]string, len(c.args))
+ for i, arg := range c.args {
+ args[i] = arg.String()
+ }
+ arguments := strings.Join(args, ", ")
+ return fmt.Sprintf("%T.%v(%s)", c.receiver, c.method, arguments)
+}
+
+// Tests if the given call matches the expected call.
+func (c *Call) matches(args []interface{}) bool {
+ if len(args) != len(c.args) {
+ return false
+ }
+ for i, m := range c.args {
+ if !m.Matches(args[i]) {
+ return false
+ }
+ }
+
+ // Check that all prerequisite calls have been satisfied.
+ for _, preReqCall := range c.preReqs {
+ if !preReqCall.satisfied() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// dropPrereqs tells the expected Call to not re-check prerequite calls any
+// longer, and to return its current set.
+func (c *Call) dropPrereqs() (preReqs []*Call) {
+ preReqs = c.preReqs
+ c.preReqs = nil
+ return
+}
+
+func (c *Call) call(args []interface{}) (rets []interface{}, action func()) {
+ c.numCalls++
+
+ // Actions
+ if c.doFunc.IsValid() {
+ doArgs := make([]reflect.Value, len(args))
+ ft := c.doFunc.Type()
+ for i := 0; i < ft.NumIn(); i++ {
+ if args[i] != nil {
+ doArgs[i] = reflect.ValueOf(args[i])
+ } else {
+ // Use the zero value for the arg.
+ doArgs[i] = reflect.Zero(ft.In(i))
+ }
+ }
+ action = func() { c.doFunc.Call(doArgs) }
+ }
+ for n, v := range c.setArgs {
+ reflect.ValueOf(args[n]).Elem().Set(v)
+ }
+
+ rets = c.rets
+ if rets == nil {
+ // Synthesize the zero value for each of the return args' types.
+ mt := c.methodType()
+ rets = make([]interface{}, mt.NumOut())
+ for i := 0; i < mt.NumOut(); i++ {
+ rets[i] = reflect.Zero(mt.Out(i)).Interface()
+ }
+ }
+
+ return
+}
+
+func (c *Call) methodType() reflect.Type {
+ recv := reflect.ValueOf(c.receiver)
+ for i := 0; i < recv.Type().NumMethod(); i++ {
+ if recv.Type().Method(i).Name == c.method {
+ return recv.Method(i).Type()
+ }
+ }
+ panic(fmt.Sprintf("gomock: failed finding method %s on %T", c.method, c.receiver))
+}
+
+// InOrder declares that the given calls should occur in order.
+func InOrder(calls ...*Call) {
+ for i := 1; i < len(calls); i++ {
+ calls[i].After(calls[i-1])
+ }
+}
diff --git a/vendor/src/github.com/golang/mock/gomock/callset.go b/vendor/src/github.com/golang/mock/gomock/callset.go
new file mode 100644
index 0000000000..1b7de4c0b3
--- /dev/null
+++ b/vendor/src/github.com/golang/mock/gomock/callset.go
@@ -0,0 +1,76 @@
+// Copyright 2011 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gomock
+
+// callSet represents a set of expected calls, indexed by receiver and method
+// name.
+type callSet map[interface{}]map[string][]*Call
+
+// Add adds a new expected call.
+func (cs callSet) Add(call *Call) {
+ methodMap, ok := cs[call.receiver]
+ if !ok {
+ methodMap = make(map[string][]*Call)
+ cs[call.receiver] = methodMap
+ }
+ methodMap[call.method] = append(methodMap[call.method], call)
+}
+
+// Remove removes an expected call.
+func (cs callSet) Remove(call *Call) {
+ methodMap, ok := cs[call.receiver]
+ if !ok {
+ return
+ }
+ sl := methodMap[call.method]
+ for i, c := range sl {
+ if c == call {
+ // quick removal; we don't need to maintain call order
+ if len(sl) > 1 {
+ sl[i] = sl[len(sl)-1]
+ }
+ methodMap[call.method] = sl[:len(sl)-1]
+ break
+ }
+ }
+}
+
+// FindMatch searches for a matching call. Returns nil if no call matched.
+func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) *Call {
+ methodMap, ok := cs[receiver]
+ if !ok {
+ return nil
+ }
+ calls, ok := methodMap[method]
+ if !ok {
+ return nil
+ }
+
+ // Search through the unordered set of calls expected on a method on a
+ // receiver.
+ for _, call := range calls {
+ // A call should not normally still be here if exhausted,
+ // but it can happen if, for instance, .Times(0) was used.
+ // Pretend the call doesn't match.
+ if call.exhausted() {
+ continue
+ }
+ if call.matches(args) {
+ return call
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/golang/mock/gomock/controller.go b/vendor/src/github.com/golang/mock/gomock/controller.go
new file mode 100644
index 0000000000..6ca952803c
--- /dev/null
+++ b/vendor/src/github.com/golang/mock/gomock/controller.go
@@ -0,0 +1,167 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// GoMock - a mock framework for Go.
+//
+// Standard usage:
+// (1) Define an interface that you wish to mock.
+// type MyInterface interface {
+// SomeMethod(x int64, y string)
+// }
+// (2) Use mockgen to generate a mock from the interface.
+// (3) Use the mock in a test:
+// func TestMyThing(t *testing.T) {
+// mockCtrl := gomock.NewController(t)
+// defer mockCtrl.Finish()
+//
+// mockObj := something.NewMockMyInterface(mockCtrl)
+// mockObj.EXPECT().SomeMethod(4, "blah")
+// // pass mockObj to a real object and play with it.
+// }
+//
+// By default, expected calls are not enforced to run in any particular order.
+// Call order dependency can be enforced by use of InOrder and/or Call.After.
+// Call.After can create more varied call order dependencies, but InOrder is
+// often more convenient.
+//
+// The following examples create equivalent call order dependencies.
+//
+// Example of using Call.After to chain expected call order:
+//
+// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
+// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
+// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
+//
+// Example of using InOrder to declare expected call order:
+//
+// gomock.InOrder(
+// mockObj.EXPECT().SomeMethod(1, "first"),
+// mockObj.EXPECT().SomeMethod(2, "second"),
+// mockObj.EXPECT().SomeMethod(3, "third"),
+// )
+//
+// TODO:
+// - Handle different argument/return types (e.g. ..., chan, map, interface).
+package gomock
+
+import "sync"
+
+// A TestReporter is something that can be used to report test failures.
+// It is satisfied by the standard library's *testing.T.
+type TestReporter interface {
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+}
+
+// A Controller represents the top-level control of a mock ecosystem.
+// It defines the scope and lifetime of mock objects, as well as their expectations.
+// It is safe to call Controller's methods from multiple goroutines.
+type Controller struct {
+ mu sync.Mutex
+ t TestReporter
+ expectedCalls callSet
+}
+
+func NewController(t TestReporter) *Controller {
+ return &Controller{
+ t: t,
+ expectedCalls: make(callSet),
+ }
+}
+
+func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
+ // TODO: check arity, types.
+ margs := make([]Matcher, len(args))
+ for i, arg := range args {
+ if m, ok := arg.(Matcher); ok {
+ margs[i] = m
+ } else if arg == nil {
+ // Handle nil specially so that passing a nil interface value
+ // will match the typed nils of concrete args.
+ margs[i] = Nil()
+ } else {
+ margs[i] = Eq(arg)
+ }
+ }
+
+ ctrl.mu.Lock()
+ defer ctrl.mu.Unlock()
+
+ call := &Call{t: ctrl.t, receiver: receiver, method: method, args: margs, minCalls: 1, maxCalls: 1}
+
+ ctrl.expectedCalls.Add(call)
+ return call
+}
+
+func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
+ ctrl.mu.Lock()
+ defer ctrl.mu.Unlock()
+
+ expected := ctrl.expectedCalls.FindMatch(receiver, method, args)
+ if expected == nil {
+ ctrl.t.Fatalf("no matching expected call: %T.%v(%v)", receiver, method, args)
+ }
+
+ // Two things happen here:
+ // * the matching call no longer needs to check prerequite calls,
+ // * and the prerequite calls are no longer expected, so remove them.
+ preReqCalls := expected.dropPrereqs()
+ for _, preReqCall := range preReqCalls {
+ ctrl.expectedCalls.Remove(preReqCall)
+ }
+
+ rets, action := expected.call(args)
+ if expected.exhausted() {
+ ctrl.expectedCalls.Remove(expected)
+ }
+
+ // Don't hold the lock while doing the call's action (if any)
+ // so that actions may execute concurrently.
+ // We use the deferred Unlock to capture any panics that happen above;
+ // here we add a deferred Lock to balance it.
+ ctrl.mu.Unlock()
+ defer ctrl.mu.Lock()
+ if action != nil {
+ action()
+ }
+
+ return rets
+}
+
+func (ctrl *Controller) Finish() {
+ ctrl.mu.Lock()
+ defer ctrl.mu.Unlock()
+
+ // If we're currently panicking, probably because this is a deferred call,
+ // pass through the panic.
+ if err := recover(); err != nil {
+ panic(err)
+ }
+
+ // Check that all remaining expected calls are satisfied.
+ failures := false
+ for _, methodMap := range ctrl.expectedCalls {
+ for _, calls := range methodMap {
+ for _, call := range calls {
+ if !call.satisfied() {
+ ctrl.t.Errorf("missing call(s) to %v", call)
+ failures = true
+ }
+ }
+ }
+ }
+ if failures {
+ ctrl.t.Fatalf("aborting test due to missing call(s)")
+ }
+}
diff --git a/vendor/src/github.com/golang/mock/gomock/matchers.go b/vendor/src/github.com/golang/mock/gomock/matchers.go
new file mode 100644
index 0000000000..32628ae8cb
--- /dev/null
+++ b/vendor/src/github.com/golang/mock/gomock/matchers.go
@@ -0,0 +1,97 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gomock
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// A Matcher is a representation of a class of values.
+// It is used to represent the valid or expected arguments to a mocked method.
+type Matcher interface {
+ // Matches returns whether y is a match.
+ Matches(x interface{}) bool
+
+ // String describes what the matcher matches.
+ String() string
+}
+
+type anyMatcher struct{}
+
+func (anyMatcher) Matches(x interface{}) bool {
+ return true
+}
+
+func (anyMatcher) String() string {
+ return "is anything"
+}
+
+type eqMatcher struct {
+ x interface{}
+}
+
+func (e eqMatcher) Matches(x interface{}) bool {
+ return reflect.DeepEqual(e.x, x)
+}
+
+func (e eqMatcher) String() string {
+ return fmt.Sprintf("is equal to %v", e.x)
+}
+
+type nilMatcher struct{}
+
+func (nilMatcher) Matches(x interface{}) bool {
+ if x == nil {
+ return true
+ }
+
+ v := reflect.ValueOf(x)
+ switch v.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
+ reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+
+ return false
+}
+
+func (nilMatcher) String() string {
+ return "is nil"
+}
+
+type notMatcher struct {
+ m Matcher
+}
+
+func (n notMatcher) Matches(x interface{}) bool {
+ return !n.m.Matches(x)
+}
+
+func (n notMatcher) String() string {
+ // TODO: Improve this if we add a NotString method to the Matcher interface.
+ return "not(" + n.m.String() + ")"
+}
+
+// Constructors
+func Any() Matcher { return anyMatcher{} }
+func Eq(x interface{}) Matcher { return eqMatcher{x} }
+func Nil() Matcher { return nilMatcher{} }
+func Not(x interface{}) Matcher {
+ if m, ok := x.(Matcher); ok {
+ return notMatcher{m}
+ }
+ return notMatcher{Eq(x)}
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/LICENSE b/vendor/src/github.com/google/certificate-transparency/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/google/certificate-transparency/go/README.md b/vendor/src/github.com/google/certificate-transparency/go/README.md
new file mode 100644
index 0000000000..82c5d1b3af
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/README.md
@@ -0,0 +1,25 @@
+This is the really early beginnings of a certificate transparency log
+client written in Go, along with a log scanner tool.
+
+You'll need go v1.1 or higher to compile.
+
+# Installation
+
+This go code must be imported into your go workspace before you can
+use it, which can be done with:
+
+ go get github.com/google/certificate-transparency/go/client
+ go get github.com/google/certificate-transparency/go/scanner
+ etc.
+
+# Building the binaries
+
+To compile the log scanner run:
+
+ go build github.com/google/certificate-transparency/go/scanner/main/scanner.go
+
+# Contributing
+
+When sending pull requests, please ensure that everything's been run
+through ```gofmt``` beforehand so we can keep everything nice and
+tidy.
diff --git a/vendor/src/github.com/google/certificate-transparency/go/asn1/asn1.go b/vendor/src/github.com/google/certificate-transparency/go/asn1/asn1.go
new file mode 100755
index 0000000000..e987477952
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/asn1/asn1.go
@@ -0,0 +1,956 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
+// as defined in ITU-T Rec X.690.
+//
+// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
+// http://luca.ntop.org/Teaching/Appunti/asn1.html.
+//
+// START CT CHANGES
+// This is a fork of the Go standard library ASN.1 implementation
+// (encoding/asn1). The main difference is that this version tries to correct
+// for errors (e.g. use of tagPrintableString when the string data is really
+// ISO8859-1 - a common error present in many x509 certificates in the wild.)
+// END CT CHANGES
+package asn1
+
+// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
+// are different encoding formats for those objects. Here, we'll be dealing
+// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
+// it's fast to parse and, unlike BER, has a unique encoding for every object.
+// When calculating hashes over objects, it's important that the resulting
+// bytes be the same at both ends and DER removes this margin of error.
+//
+// ASN.1 is very complex and this package doesn't attempt to implement
+// everything by any means.
+
+import (
+ // START CT CHANGES
+ "errors"
+ "fmt"
+ // END CT CHANGES
+ "math/big"
+ "reflect"
+ // START CT CHANGES
+ "strings"
+ // END CT CHANGES
+ "time"
+)
+
+// A StructuralError suggests that the ASN.1 data is valid, but the Go type
+// which is receiving it doesn't match.
+type StructuralError struct {
+ Msg string
+}
+
+func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
+
+// A SyntaxError suggests that the ASN.1 data is invalid.
+type SyntaxError struct {
+ Msg string
+}
+
+func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
+
+// We start by dealing with each of the primitive types in turn.
+
+// BOOLEAN
+
+func parseBool(bytes []byte) (ret bool, err error) {
+ if len(bytes) != 1 {
+ err = SyntaxError{"invalid boolean"}
+ return
+ }
+
+ // DER demands that "If the encoding represents the boolean value TRUE,
+ // its single contents octet shall have all eight bits set to one."
+ // Thus only 0 and 255 are valid encoded values.
+ switch bytes[0] {
+ case 0:
+ ret = false
+ case 0xff:
+ ret = true
+ default:
+ err = SyntaxError{"invalid boolean"}
+ }
+
+ return
+}
+
+// INTEGER
+
+// parseInt64 treats the given bytes as a big-endian, signed integer and
+// returns the result.
+func parseInt64(bytes []byte) (ret int64, err error) {
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = StructuralError{"integer too large"}
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+// parseInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseInt32(bytes []byte) (int32, error) {
+ ret64, err := parseInt64(bytes)
+ if err != nil {
+ return 0, err
+ }
+ if ret64 != int64(int32(ret64)) {
+ return 0, StructuralError{"integer too large"}
+ }
+ return int32(ret64), nil
+}
+
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte) *big.Int {
+ ret := new(big.Int)
+ if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+ // This is a negative number.
+ notBytes := make([]byte, len(bytes))
+ for i := range notBytes {
+ notBytes[i] = ^bytes[i]
+ }
+ ret.SetBytes(notBytes)
+ ret.Add(ret, bigOne)
+ ret.Neg(ret)
+ return ret
+ }
+ ret.SetBytes(bytes)
+ return ret
+}
+
+// BIT STRING
+
+// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
+// bit string is padded up to the nearest byte in memory and the number of
+// valid bits is recorded. Padding bits will be zero.
+type BitString struct {
+ Bytes []byte // bits packed into bytes.
+ BitLength int // length in bits.
+}
+
+// At returns the bit at the given index. If the index is out of range it
+// returns false.
+func (b BitString) At(i int) int {
+ if i < 0 || i >= b.BitLength {
+ return 0
+ }
+ x := i / 8
+ y := 7 - uint(i%8)
+ return int(b.Bytes[x]>>y) & 1
+}
+
+// RightAlign returns a slice where the padding bits are at the beginning. The
+// slice may share memory with the BitString.
+func (b BitString) RightAlign() []byte {
+ shift := uint(8 - (b.BitLength % 8))
+ if shift == 8 || len(b.Bytes) == 0 {
+ return b.Bytes
+ }
+
+ a := make([]byte, len(b.Bytes))
+ a[0] = b.Bytes[0] >> shift
+ for i := 1; i < len(b.Bytes); i++ {
+ a[i] = b.Bytes[i-1] << (8 - shift)
+ a[i] |= b.Bytes[i] >> shift
+ }
+
+ return a
+}
+
+// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
+func parseBitString(bytes []byte) (ret BitString, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length BIT STRING"}
+ return
+ }
+ paddingBits := int(bytes[0])
+ if paddingBits > 7 ||
+ len(bytes) == 1 && paddingBits > 0 ||
+ bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
+ err = SyntaxError{"invalid padding bits in BIT STRING"}
+ return
+ }
+ ret.BitLength = (len(bytes)-1)*8 - paddingBits
+ ret.Bytes = bytes[1:]
+ return
+}
+
+// OBJECT IDENTIFIER
+
+// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
+type ObjectIdentifier []int
+
+// Equal reports whether oi and other represent the same identifier.
+func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
+ if len(oi) != len(other) {
+ return false
+ }
+ for i := 0; i < len(oi); i++ {
+ if oi[i] != other[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte) (s []int, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length OBJECT IDENTIFIER"}
+ return
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ s = make([]int, len(bytes)+1)
+
+ // The first varint is 40*value1 + value2:
+ // According to this packing, value1 can take the values 0, 1 and 2 only.
+ // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+ // then there are no restrictions on value2.
+ v, offset, err := parseBase128Int(bytes, 0)
+ if err != nil {
+ return
+ }
+ if v < 80 {
+ s[0] = v / 40
+ s[1] = v % 40
+ } else {
+ s[0] = 2
+ s[1] = v - 80
+ }
+
+ i := 2
+ for ; offset < len(bytes); i++ {
+ v, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ s[i] = v
+ }
+ s = s[0:i]
+ return
+}
+
+// ENUMERATED
+
+// An Enumerated is represented as a plain int.
+type Enumerated int
+
+// FLAG
+
+// A Flag accepts any data and is set to true if present.
+type Flag bool
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
+ offset = initOffset
+ for shifted := 0; offset < len(bytes); shifted++ {
+ if shifted > 4 {
+ err = StructuralError{"base 128 integer too large"}
+ return
+ }
+ ret <<= 7
+ b := bytes[offset]
+ ret |= int(b & 0x7f)
+ offset++
+ if b&0x80 == 0 {
+ return
+ }
+ }
+ err = SyntaxError{"truncated base 128 integer"}
+ return
+}
+
+// UTCTime
+
+func parseUTCTime(bytes []byte) (ret time.Time, err error) {
+ s := string(bytes)
+ ret, err = time.Parse("0601021504Z0700", s)
+ if err != nil {
+ ret, err = time.Parse("060102150405Z0700", s)
+ }
+ if err == nil && ret.Year() >= 2050 {
+ // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+ ret = ret.AddDate(-100, 0, 0)
+ }
+
+ return
+}
+
+// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
+// and returns the resulting time.
+func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
+ return time.Parse("20060102150405Z0700", string(bytes))
+}
+
+// PrintableString
+
+// parsePrintableString parses a ASN.1 PrintableString from the given byte
+// array and returns it.
+func parsePrintableString(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if !isPrintable(b) {
+ err = SyntaxError{"PrintableString contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// isPrintable returns true iff the given b is in the ASN.1 PrintableString set.
+func isPrintable(b byte) bool {
+ return 'a' <= b && b <= 'z' ||
+ 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' ||
+ '\'' <= b && b <= ')' ||
+ '+' <= b && b <= '/' ||
+ b == ' ' ||
+ b == ':' ||
+ b == '=' ||
+ b == '?' ||
+ // This is technically not allowed in a PrintableString.
+ // However, x509 certificates with wildcard strings don't
+ // always use the correct string type so we permit it.
+ b == '*'
+}
+
+// IA5String
+
+// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
+// byte slice and returns it.
+func parseIA5String(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if b >= 0x80 {
+ err = SyntaxError{"IA5String contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// T61String
+
+// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
+// byte slice and returns it.
+func parseT61String(bytes []byte) (ret string, err error) {
+ return string(bytes), nil
+}
+
+// UTF8String
+
+// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
+// array and returns it.
+func parseUTF8String(bytes []byte) (ret string, err error) {
+ return string(bytes), nil
+}
+
+// A RawValue represents an undecoded ASN.1 object.
+type RawValue struct {
+ Class, Tag int
+ IsCompound bool
+ Bytes []byte
+ FullBytes []byte // includes the tag and length
+}
+
+// RawContent is used to signal that the undecoded, DER data needs to be
+// preserved for a struct. To use it, the first field of the struct must have
+// this type. It's an error for any of the other fields to have this type.
+type RawContent []byte
+
+// Tagging
+
+// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
+// into a byte slice. It returns the parsed data and the new offset. SET and
+// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
+// don't distinguish between ordered and unordered objects in this code.
+func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
+ offset = initOffset
+ b := bytes[offset]
+ offset++
+ ret.class = int(b >> 6)
+ ret.isCompound = b&0x20 == 0x20
+ ret.tag = int(b & 0x1f)
+
+ // If the bottom five bits are set, then the tag number is actually base 128
+ // encoded afterwards
+ if ret.tag == 0x1f {
+ ret.tag, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ }
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if b&0x80 == 0 {
+ // The length is encoded in the bottom 7 bits.
+ ret.length = int(b & 0x7f)
+ } else {
+ // Bottom 7 bits give the number of length bytes to follow.
+ numBytes := int(b & 0x7f)
+ if numBytes == 0 {
+ err = SyntaxError{"indefinite length found (not DER)"}
+ return
+ }
+ ret.length = 0
+ for i := 0; i < numBytes; i++ {
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if ret.length >= 1<<23 {
+ // We can't shift ret.length up without
+ // overflowing.
+ err = StructuralError{"length too large"}
+ return
+ }
+ ret.length <<= 8
+ ret.length |= int(b)
+ if ret.length == 0 {
+ // DER requires that lengths be minimal.
+ err = StructuralError{"superfluous leading zeros in length"}
+ return
+ }
+ }
+ }
+
+ return
+}
+
+// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
+// a number of ASN.1 values from the given byte slice and returns them as a
+// slice of Go values of the given type.
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
+ expectedTag, compoundType, ok := getUniversalType(elemType)
+ if !ok {
+ err = StructuralError{"unknown Go type for slice"}
+ return
+ }
+
+ // First we iterate over the input and count the number of elements,
+ // checking that the types are correct in each case.
+ numElements := 0
+ for offset := 0; offset < len(bytes); {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ // We pretend that GENERAL STRINGs are PRINTABLE STRINGs so
+ // that a sequence of them can be parsed into a []string.
+ if t.tag == tagGeneralString {
+ t.tag = tagPrintableString
+ }
+ if t.class != classUniversal || t.isCompound != compoundType || t.tag != expectedTag {
+ err = StructuralError{"sequence tag mismatch"}
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"truncated sequence"}
+ return
+ }
+ offset += t.length
+ numElements++
+ }
+ ret = reflect.MakeSlice(sliceType, numElements, numElements)
+ params := fieldParameters{}
+ offset := 0
+ for i := 0; i < numElements; i++ {
+ offset, err = parseField(ret.Index(i), bytes, offset, params)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+var (
+ bitStringType = reflect.TypeOf(BitString{})
+ objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
+ enumeratedType = reflect.TypeOf(Enumerated(0))
+ flagType = reflect.TypeOf(Flag(false))
+ timeType = reflect.TypeOf(time.Time{})
+ rawValueType = reflect.TypeOf(RawValue{})
+ rawContentsType = reflect.TypeOf(RawContent(nil))
+ bigIntType = reflect.TypeOf(new(big.Int))
+)
+
+// invalidLength returns true iff offset + length > sliceLength, or if the
+// addition would overflow.
+func invalidLength(offset, length, sliceLength int) bool {
+ return offset+length < offset || offset+length > sliceLength
+}
+
+// START CT CHANGES
+
+// Tests whether the data in |bytes| would be a valid ISO8859-1 string.
+// Clearly, a sequence of bytes comprised solely of valid ISO8859-1
+// codepoints does not imply that the encoding MUST be ISO8859-1, rather that
+// you would not encounter an error trying to interpret the data as such.
+func couldBeISO8859_1(bytes []byte) bool {
+ for _, b := range bytes {
+ if b < 0x20 || (b >= 0x7F && b < 0xA0) {
+ return false
+ }
+ }
+ return true
+}
+
+// Checks whether the data in |bytes| would be a valid T.61 string.
+// Clearly, a sequence of bytes comprised solely of valid T.61
+// codepoints does not imply that the encoding MUST be T.61, rather that
+// you would not encounter an error trying to interpret the data as such.
+func couldBeT61(bytes []byte) bool {
+ for _, b := range bytes {
+ switch b {
+ case 0x00:
+ // Since we're guessing at (incorrect) encodings for a
+ // PrintableString, we'll err on the side of caution and disallow
+ // strings with a NUL in them, don't want to re-create a PayPal NUL
+ // situation in monitors.
+ fallthrough
+ case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF,
+ 0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
+ 0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF:
+ // These are all invalid code points in T.61, so it can't be a T.61 string.
+ return false
+ }
+ }
+ return true
+}
+
+// Converts the data in |bytes| to the equivalent UTF-8 string.
+func iso8859_1ToUTF8(bytes []byte) string {
+ buf := make([]rune, len(bytes))
+ for i, b := range bytes {
+ buf[i] = rune(b)
+ }
+ return string(buf)
+}
+
+// END CT CHANGES
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// into the array, it will try to parse a suitable ASN.1 value out and store it
+// in the given Value.
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
+ offset = initOffset
+ fieldType := v.Type()
+
+ // If we have run out of data, it may be that there are optional elements at the end.
+ if offset == len(bytes) {
+ if !setDefaultValue(v, params) {
+ err = SyntaxError{"sequence truncated"}
+ }
+ return
+ }
+
+ // Deal with raw values.
+ if fieldType == rawValueType {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
+ offset += t.length
+ v.Set(reflect.ValueOf(result))
+ return
+ }
+
+ // Deal with the ANY type.
+ if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ var result interface{}
+ if !t.isCompound && t.class == classUniversal {
+ innerBytes := bytes[offset : offset+t.length]
+ switch t.tag {
+ case tagPrintableString:
+ result, err = parsePrintableString(innerBytes)
+ // START CT CHANGES
+ if err != nil && strings.Contains(err.Error(), "PrintableString contains invalid character") {
+ // Probably an ISO8859-1 string stuffed in, check if it
+ // would be valid and assume that's what's happened if so,
+ // otherwise try T.61, failing that give up and just assign
+ // the bytes
+ switch {
+ case couldBeISO8859_1(innerBytes):
+ result, err = iso8859_1ToUTF8(innerBytes), nil
+ case couldBeT61(innerBytes):
+ result, err = parseT61String(innerBytes)
+ default:
+ result = nil
+ err = errors.New("PrintableString contains invalid character, but couldn't determine correct String type.")
+ }
+ }
+ // END CT CHANGES
+ case tagIA5String:
+ result, err = parseIA5String(innerBytes)
+ case tagT61String:
+ result, err = parseT61String(innerBytes)
+ case tagUTF8String:
+ result, err = parseUTF8String(innerBytes)
+ case tagInteger:
+ result, err = parseInt64(innerBytes)
+ case tagBitString:
+ result, err = parseBitString(innerBytes)
+ case tagOID:
+ result, err = parseObjectIdentifier(innerBytes)
+ case tagUTCTime:
+ result, err = parseUTCTime(innerBytes)
+ case tagOctetString:
+ result = innerBytes
+ default:
+ // If we don't know how to handle the type, we just leave Value as nil.
+ }
+ }
+ offset += t.length
+ if err != nil {
+ return
+ }
+ if result != nil {
+ v.Set(reflect.ValueOf(result))
+ }
+ return
+ }
+ universalTag, compoundType, ok1 := getUniversalType(fieldType)
+ if !ok1 {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
+ return
+ }
+
+ t, offset, err := parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if params.explicit {
+ expectedClass := classContextSpecific
+ if params.application {
+ expectedClass = classApplication
+ }
+ if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+ if t.length > 0 {
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ } else {
+ if fieldType != flagType {
+ err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
+ return
+ }
+ v.SetBool(true)
+ return
+ }
+ } else {
+ // The tags didn't match, it might be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{"explicitly tagged member didn't match"}
+ }
+ return
+ }
+ }
+
+ // Special case for strings: all the ASN.1 string types map to the Go
+ // type string. getUniversalType returns the tag for PrintableString
+ // when it sees a string, so if we see a different string type on the
+ // wire, we change the universal type to match.
+ if universalTag == tagPrintableString {
+ switch t.tag {
+ case tagIA5String, tagGeneralString, tagT61String, tagUTF8String:
+ universalTag = t.tag
+ }
+ }
+
+ // Special case for time: UTCTime and GeneralizedTime both map to the
+ // Go type time.Time.
+ if universalTag == tagUTCTime && t.tag == tagGeneralizedTime {
+ universalTag = tagGeneralizedTime
+ }
+
+ expectedClass := classUniversal
+ expectedTag := universalTag
+
+ if !params.explicit && params.tag != nil {
+ expectedClass = classContextSpecific
+ expectedTag = *params.tag
+ }
+
+ if !params.explicit && params.application && params.tag != nil {
+ expectedClass = classApplication
+ expectedTag = *params.tag
+ }
+
+ // We have unwrapped any explicit tagging at this point.
+ if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
+ // Tags don't match. Again, it could be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
+ }
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ innerBytes := bytes[offset : offset+t.length]
+ offset += t.length
+
+ // We deal with the structures defined in this package first.
+ switch fieldType {
+ case objectIdentifierType:
+ newSlice, err1 := parseObjectIdentifier(innerBytes)
+ v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
+ if err1 == nil {
+ reflect.Copy(v, reflect.ValueOf(newSlice))
+ }
+ err = err1
+ return
+ case bitStringType:
+ bs, err1 := parseBitString(innerBytes)
+ if err1 == nil {
+ v.Set(reflect.ValueOf(bs))
+ }
+ err = err1
+ return
+ case timeType:
+ var time time.Time
+ var err1 error
+ if universalTag == tagUTCTime {
+ time, err1 = parseUTCTime(innerBytes)
+ } else {
+ time, err1 = parseGeneralizedTime(innerBytes)
+ }
+ if err1 == nil {
+ v.Set(reflect.ValueOf(time))
+ }
+ err = err1
+ return
+ case enumeratedType:
+ parsedInt, err1 := parseInt32(innerBytes)
+ if err1 == nil {
+ v.SetInt(int64(parsedInt))
+ }
+ err = err1
+ return
+ case flagType:
+ v.SetBool(true)
+ return
+ case bigIntType:
+ parsedInt := parseBigInt(innerBytes)
+ v.Set(reflect.ValueOf(parsedInt))
+ return
+ }
+ switch val := v; val.Kind() {
+ case reflect.Bool:
+ parsedBool, err1 := parseBool(innerBytes)
+ if err1 == nil {
+ val.SetBool(parsedBool)
+ }
+ err = err1
+ return
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ if val.Type().Size() == 4 {
+ parsedInt, err1 := parseInt32(innerBytes)
+ if err1 == nil {
+ val.SetInt(int64(parsedInt))
+ }
+ err = err1
+ } else {
+ parsedInt, err1 := parseInt64(innerBytes)
+ if err1 == nil {
+ val.SetInt(parsedInt)
+ }
+ err = err1
+ }
+ return
+ // TODO(dfc) Add support for the remaining integer types
+ case reflect.Struct:
+ structType := fieldType
+
+ if structType.NumField() > 0 &&
+ structType.Field(0).Type == rawContentsType {
+ bytes := bytes[initOffset:offset]
+ val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
+ }
+
+ innerOffset := 0
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ if i == 0 && field.Type == rawContentsType {
+ continue
+ }
+ innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
+ if err != nil {
+ return
+ }
+ }
+ // We allow extra bytes at the end of the SEQUENCE because
+ // adding elements to the end has been used in X.509 as the
+ // version numbers have increased.
+ return
+ case reflect.Slice:
+ sliceType := fieldType
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
+ reflect.Copy(val, reflect.ValueOf(innerBytes))
+ return
+ }
+ newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
+ if err1 == nil {
+ val.Set(newSlice)
+ }
+ err = err1
+ return
+ case reflect.String:
+ var v string
+ switch universalTag {
+ case tagPrintableString:
+ v, err = parsePrintableString(innerBytes)
+ case tagIA5String:
+ v, err = parseIA5String(innerBytes)
+ case tagT61String:
+ v, err = parseT61String(innerBytes)
+ case tagUTF8String:
+ v, err = parseUTF8String(innerBytes)
+ case tagGeneralString:
+ // GeneralString is specified in ISO-2022/ECMA-35,
+ // A brief review suggests that it includes structures
+ // that allow the encoding to change midstring and
+ // such. We give up and pass it as an 8-bit string.
+ v, err = parseT61String(innerBytes)
+ default:
+ err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
+ }
+ if err == nil {
+ val.SetString(v)
+ }
+ return
+ }
+ err = StructuralError{"unsupported: " + v.Type().String()}
+ return
+}
+
+// setDefaultValue is used to install a default value, from a tag string, into
+// a Value. It is successful is the field was optional, even if a default value
+// wasn't provided or it failed to install it into the Value.
+func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
+ if !params.optional {
+ return
+ }
+ ok = true
+ if params.defaultValue == nil {
+ return
+ }
+ switch val := v; val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val.SetInt(*params.defaultValue)
+ }
+ return
+}
+
+// Unmarshal parses the DER-encoded ASN.1 data structure b
+// and uses the reflect package to fill in an arbitrary value pointed at by val.
+// Because Unmarshal uses the reflect package, the structs
+// being written to must use upper case field names.
+//
+// An ASN.1 INTEGER can be written to an int, int32, int64,
+// or *big.Int (from the math/big package).
+// If the encoded value does not fit in the Go type,
+// Unmarshal returns a parse error.
+//
+// An ASN.1 BIT STRING can be written to a BitString.
+//
+// An ASN.1 OCTET STRING can be written to a []byte.
+//
+// An ASN.1 OBJECT IDENTIFIER can be written to an
+// ObjectIdentifier.
+//
+// An ASN.1 ENUMERATED can be written to an Enumerated.
+//
+// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
+//
+// An ASN.1 PrintableString or IA5String can be written to a string.
+//
+// Any of the above ASN.1 values can be written to an interface{}.
+// The value stored in the interface has the corresponding Go type.
+// For integers, that type is int64.
+//
+// An ASN.1 SEQUENCE OF x or SET OF x can be written
+// to a slice if an x can be written to the slice's element type.
+//
+// An ASN.1 SEQUENCE or SET can be written to a struct
+// if each of the elements in the sequence can be
+// written to the corresponding element in the struct.
+//
+// The following tags on struct fields have special meaning to Unmarshal:
+//
+// optional marks the field as ASN.1 OPTIONAL
+// [explicit] tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+// default:x sets the default value for optional integer fields
+//
+// If the type of the first field of a structure is RawContent then the raw
+// ASN1 contents of the struct will be stored in it.
+//
+// Other ASN.1 types are not supported; if it encounters them,
+// Unmarshal returns a parse error.
+func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
+ return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
+ v := reflect.ValueOf(val).Elem()
+ offset, err := parseField(v, b, 0, parseFieldParameters(params))
+ if err != nil {
+ return nil, err
+ }
+ return b[offset:], nil
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/asn1/common.go b/vendor/src/github.com/google/certificate-transparency/go/asn1/common.go
new file mode 100755
index 0000000000..33a117ece1
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/asn1/common.go
@@ -0,0 +1,163 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// ASN.1 objects have metadata preceding them:
+// the tag: the type of the object
+// a flag denoting if this object is compound or not
+// the class type: the namespace of the tag
+// the length of the object, in bytes
+
+// Here are some standard tags and classes
+
+const (
+ tagBoolean = 1
+ tagInteger = 2
+ tagBitString = 3
+ tagOctetString = 4
+ tagOID = 6
+ tagEnum = 10
+ tagUTF8String = 12
+ tagSequence = 16
+ tagSet = 17
+ tagPrintableString = 19
+ tagT61String = 20
+ tagIA5String = 22
+ tagUTCTime = 23
+ tagGeneralizedTime = 24
+ tagGeneralString = 27
+)
+
+const (
+ classUniversal = 0
+ classApplication = 1
+ classContextSpecific = 2
+ classPrivate = 3
+)
+
+type tagAndLength struct {
+ class, tag, length int
+ isCompound bool
+}
+
+// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
+// of" and "in addition to". When not specified, every primitive type has a
+// default tag in the UNIVERSAL class.
+//
+// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
+// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
+// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
+//
+// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
+// /additional/ tag would wrap the default tag. This explicit tag will have the
+// compound flag set.
+//
+// (This is used in order to remove ambiguity with optional elements.)
+//
+// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
+// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
+// tagging with tag strings on the fields of a structure.
+
+// fieldParameters is the parsed representation of tag string from a structure field.
+type fieldParameters struct {
+ optional bool // true iff the field is OPTIONAL
+ explicit bool // true iff an EXPLICIT tag is in use.
+ application bool // true iff an APPLICATION tag is in use.
+ defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
+ tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
+ stringType int // the string tag to use when marshaling.
+ set bool // true iff this should be encoded as a SET
+ omitEmpty bool // true iff this should be omitted if empty when marshaling.
+
+ // Invariants:
+ // if explicit is set, tag is non-nil.
+}
+
+// Given a tag string with the format specified in the package comment,
+// parseFieldParameters will parse it into a fieldParameters structure,
+// ignoring unknown parts of the string.
+func parseFieldParameters(str string) (ret fieldParameters) {
+ for _, part := range strings.Split(str, ",") {
+ switch {
+ case part == "optional":
+ ret.optional = true
+ case part == "explicit":
+ ret.explicit = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "ia5":
+ ret.stringType = tagIA5String
+ case part == "printable":
+ ret.stringType = tagPrintableString
+ case part == "utf8":
+ ret.stringType = tagUTF8String
+ case strings.HasPrefix(part, "default:"):
+ i, err := strconv.ParseInt(part[8:], 10, 64)
+ if err == nil {
+ ret.defaultValue = new(int64)
+ *ret.defaultValue = i
+ }
+ case strings.HasPrefix(part, "tag:"):
+ i, err := strconv.Atoi(part[4:])
+ if err == nil {
+ ret.tag = new(int)
+ *ret.tag = i
+ }
+ case part == "set":
+ ret.set = true
+ case part == "application":
+ ret.application = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "omitempty":
+ ret.omitEmpty = true
+ }
+ }
+ return
+}
+
+// Given a reflected Go type, getUniversalType returns the default tag number
+// and expected compound flag.
+func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
+ switch t {
+ case objectIdentifierType:
+ return tagOID, false, true
+ case bitStringType:
+ return tagBitString, false, true
+ case timeType:
+ return tagUTCTime, false, true
+ case enumeratedType:
+ return tagEnum, false, true
+ case bigIntType:
+ return tagInteger, false, true
+ }
+ switch t.Kind() {
+ case reflect.Bool:
+ return tagBoolean, false, true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return tagInteger, false, true
+ case reflect.Struct:
+ return tagSequence, true, true
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ return tagOctetString, false, true
+ }
+ if strings.HasSuffix(t.Name(), "SET") {
+ return tagSet, true, true
+ }
+ return tagSequence, true, true
+ case reflect.String:
+ return tagPrintableString, false, true
+ }
+ return 0, false, false
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/asn1/marshal.go b/vendor/src/github.com/google/certificate-transparency/go/asn1/marshal.go
new file mode 100755
index 0000000000..ed17e41a55
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/asn1/marshal.go
@@ -0,0 +1,581 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "reflect"
+ "time"
+ "unicode/utf8"
+)
+
+// A forkableWriter is an in-memory buffer that can be
+// 'forked' to create new forkableWriters that bracket the
+// original. After
+// pre, post := w.fork();
+// the overall sequence of bytes represented is logically w+pre+post.
+type forkableWriter struct {
+ *bytes.Buffer
+ pre, post *forkableWriter
+}
+
+func newForkableWriter() *forkableWriter {
+ return &forkableWriter{new(bytes.Buffer), nil, nil}
+}
+
+func (f *forkableWriter) fork() (pre, post *forkableWriter) {
+ if f.pre != nil || f.post != nil {
+ panic("have already forked")
+ }
+ f.pre = newForkableWriter()
+ f.post = newForkableWriter()
+ return f.pre, f.post
+}
+
+func (f *forkableWriter) Len() (l int) {
+ l += f.Buffer.Len()
+ if f.pre != nil {
+ l += f.pre.Len()
+ }
+ if f.post != nil {
+ l += f.post.Len()
+ }
+ return
+}
+
+func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
+ n, err = out.Write(f.Bytes())
+ if err != nil {
+ return
+ }
+
+ var nn int
+
+ if f.pre != nil {
+ nn, err = f.pre.writeTo(out)
+ n += nn
+ if err != nil {
+ return
+ }
+ }
+
+ if f.post != nil {
+ nn, err = f.post.writeTo(out)
+ n += nn
+ }
+ return
+}
+
+func marshalBase128Int(out *forkableWriter, n int64) (err error) {
+ if n == 0 {
+ err = out.WriteByte(0)
+ return
+ }
+
+ l := 0
+ for i := n; i > 0; i >>= 7 {
+ l++
+ }
+
+ for i := l - 1; i >= 0; i-- {
+ o := byte(n >> uint(i*7))
+ o &= 0x7f
+ if i != 0 {
+ o |= 0x80
+ }
+ err = out.WriteByte(o)
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func marshalInt64(out *forkableWriter, i int64) (err error) {
+ n := int64Length(i)
+
+ for ; n > 0; n-- {
+ err = out.WriteByte(byte(i >> uint((n-1)*8)))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func int64Length(i int64) (numBytes int) {
+ numBytes = 1
+
+ for i > 127 {
+ numBytes++
+ i >>= 8
+ }
+
+ for i < -128 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
+
+func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement
+ // form. So we'll subtract 1 and invert. If the
+ // most-significant-bit isn't set then we'll need to pad the
+ // beginning with 0xff in order to keep the number negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ err = out.WriteByte(0xff)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ } else if n.Sign() == 0 {
+ // Zero is written as a single 0 zero rather than no bytes.
+ err = out.WriteByte(0x00)
+ } else {
+ bytes := n.Bytes()
+ if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+ // We'll have to pad this with 0x00 in order to stop it
+ // looking like a negative number.
+ err = out.WriteByte(0)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ }
+ return
+}
+
+func marshalLength(out *forkableWriter, i int) (err error) {
+ n := lengthLength(i)
+
+ for ; n > 0; n-- {
+ err = out.WriteByte(byte(i >> uint((n-1)*8)))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func lengthLength(i int) (numBytes int) {
+ numBytes = 1
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+ return
+}
+
+func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
+ b := uint8(t.class) << 6
+ if t.isCompound {
+ b |= 0x20
+ }
+ if t.tag >= 31 {
+ b |= 0x1f
+ err = out.WriteByte(b)
+ if err != nil {
+ return
+ }
+ err = marshalBase128Int(out, int64(t.tag))
+ if err != nil {
+ return
+ }
+ } else {
+ b |= uint8(t.tag)
+ err = out.WriteByte(b)
+ if err != nil {
+ return
+ }
+ }
+
+ if t.length >= 128 {
+ l := lengthLength(t.length)
+ err = out.WriteByte(0x80 | byte(l))
+ if err != nil {
+ return
+ }
+ err = marshalLength(out, t.length)
+ if err != nil {
+ return
+ }
+ } else {
+ err = out.WriteByte(byte(t.length))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func marshalBitString(out *forkableWriter, b BitString) (err error) {
+ paddingBits := byte((8 - b.BitLength%8) % 8)
+ err = out.WriteByte(paddingBits)
+ if err != nil {
+ return
+ }
+ _, err = out.Write(b.Bytes)
+ return
+}
+
+func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
+ if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
+ return StructuralError{"invalid object identifier"}
+ }
+
+ err = marshalBase128Int(out, int64(oid[0]*40+oid[1]))
+ if err != nil {
+ return
+ }
+ for i := 2; i < len(oid); i++ {
+ err = marshalBase128Int(out, int64(oid[i]))
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func marshalPrintableString(out *forkableWriter, s string) (err error) {
+ b := []byte(s)
+ for _, c := range b {
+ if !isPrintable(c) {
+ return StructuralError{"PrintableString contains invalid character"}
+ }
+ }
+
+ _, err = out.Write(b)
+ return
+}
+
+func marshalIA5String(out *forkableWriter, s string) (err error) {
+ b := []byte(s)
+ for _, c := range b {
+ if c > 127 {
+ return StructuralError{"IA5String contains invalid character"}
+ }
+ }
+
+ _, err = out.Write(b)
+ return
+}
+
+func marshalUTF8String(out *forkableWriter, s string) (err error) {
+ _, err = out.Write([]byte(s))
+ return
+}
+
+func marshalTwoDigits(out *forkableWriter, v int) (err error) {
+ err = out.WriteByte(byte('0' + (v/10)%10))
+ if err != nil {
+ return
+ }
+ return out.WriteByte(byte('0' + v%10))
+}
+
+func marshalUTCTime(out *forkableWriter, t time.Time) (err error) {
+ year, month, day := t.Date()
+
+ switch {
+ case 1950 <= year && year < 2000:
+ err = marshalTwoDigits(out, int(year-1900))
+ case 2000 <= year && year < 2050:
+ err = marshalTwoDigits(out, int(year-2000))
+ default:
+ return StructuralError{"cannot represent time as UTCTime"}
+ }
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, int(month))
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, day)
+ if err != nil {
+ return
+ }
+
+ hour, min, sec := t.Clock()
+
+ err = marshalTwoDigits(out, hour)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, min)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, sec)
+ if err != nil {
+ return
+ }
+
+ _, offset := t.Zone()
+
+ switch {
+ case offset/60 == 0:
+ err = out.WriteByte('Z')
+ return
+ case offset > 0:
+ err = out.WriteByte('+')
+ case offset < 0:
+ err = out.WriteByte('-')
+ }
+
+ if err != nil {
+ return
+ }
+
+ offsetMinutes := offset / 60
+ if offsetMinutes < 0 {
+ offsetMinutes = -offsetMinutes
+ }
+
+ err = marshalTwoDigits(out, offsetMinutes/60)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, offsetMinutes%60)
+ return
+}
+
+func stripTagAndLength(in []byte) []byte {
+ _, offset, err := parseTagAndLength(in, 0)
+ if err != nil {
+ return in
+ }
+ return in[offset:]
+}
+
+func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
+ switch value.Type() {
+ case timeType:
+ return marshalUTCTime(out, value.Interface().(time.Time))
+ case bitStringType:
+ return marshalBitString(out, value.Interface().(BitString))
+ case objectIdentifierType:
+ return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
+ case bigIntType:
+ return marshalBigInt(out, value.Interface().(*big.Int))
+ }
+
+ switch v := value; v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ return out.WriteByte(255)
+ } else {
+ return out.WriteByte(0)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return marshalInt64(out, int64(v.Int()))
+ case reflect.Struct:
+ t := v.Type()
+
+ startingField := 0
+
+ // If the first element of the structure is a non-empty
+ // RawContents, then we don't bother serializing the rest.
+ if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
+ s := v.Field(0)
+ if s.Len() > 0 {
+ bytes := make([]byte, s.Len())
+ for i := 0; i < s.Len(); i++ {
+ bytes[i] = uint8(s.Index(i).Uint())
+ }
+ /* The RawContents will contain the tag and
+ * length fields but we'll also be writing
+ * those ourselves, so we strip them out of
+ * bytes */
+ _, err = out.Write(stripTagAndLength(bytes))
+ return
+ } else {
+ startingField = 1
+ }
+ }
+
+ for i := startingField; i < t.NumField(); i++ {
+ var pre *forkableWriter
+ pre, out = out.fork()
+ err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
+ if err != nil {
+ return
+ }
+ }
+ return
+ case reflect.Slice:
+ sliceType := v.Type()
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ bytes := make([]byte, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ bytes[i] = uint8(v.Index(i).Uint())
+ }
+ _, err = out.Write(bytes)
+ return
+ }
+
+ var fp fieldParameters
+ for i := 0; i < v.Len(); i++ {
+ var pre *forkableWriter
+ pre, out = out.fork()
+ err = marshalField(pre, v.Index(i), fp)
+ if err != nil {
+ return
+ }
+ }
+ return
+ case reflect.String:
+ switch params.stringType {
+ case tagIA5String:
+ return marshalIA5String(out, v.String())
+ case tagPrintableString:
+ return marshalPrintableString(out, v.String())
+ default:
+ return marshalUTF8String(out, v.String())
+ }
+ }
+
+ return StructuralError{"unknown Go type"}
+}
+
+func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
+ // If the field is an interface{} then recurse into it.
+ if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
+ return marshalField(out, v.Elem(), params)
+ }
+
+ if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
+ return
+ }
+
+ if params.optional && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
+ return
+ }
+
+ if v.Type() == rawValueType {
+ rv := v.Interface().(RawValue)
+ if len(rv.FullBytes) != 0 {
+ _, err = out.Write(rv.FullBytes)
+ } else {
+ err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
+ if err != nil {
+ return
+ }
+ _, err = out.Write(rv.Bytes)
+ }
+ return
+ }
+
+ tag, isCompound, ok := getUniversalType(v.Type())
+ if !ok {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
+ return
+ }
+ class := classUniversal
+
+ if params.stringType != 0 && tag != tagPrintableString {
+ return StructuralError{"explicit string type given to non-string member"}
+ }
+
+ if tag == tagPrintableString {
+ if params.stringType == 0 {
+ // This is a string without an explicit string type. We'll use
+ // a PrintableString if the character set in the string is
+ // sufficiently limited, otherwise we'll use a UTF8String.
+ for _, r := range v.String() {
+ if r >= utf8.RuneSelf || !isPrintable(byte(r)) {
+ if !utf8.ValidString(v.String()) {
+ return errors.New("asn1: string not valid UTF-8")
+ }
+ tag = tagUTF8String
+ break
+ }
+ }
+ } else {
+ tag = params.stringType
+ }
+ }
+
+ if params.set {
+ if tag != tagSequence {
+ return StructuralError{"non sequence tagged as set"}
+ }
+ tag = tagSet
+ }
+
+ tags, body := out.fork()
+
+ err = marshalBody(body, v, params)
+ if err != nil {
+ return
+ }
+
+ bodyLen := body.Len()
+
+ var explicitTag *forkableWriter
+ if params.explicit {
+ explicitTag, tags = tags.fork()
+ }
+
+ if !params.explicit && params.tag != nil {
+ // implicit tag.
+ tag = *params.tag
+ class = classContextSpecific
+ }
+
+ err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
+ if err != nil {
+ return
+ }
+
+ if params.explicit {
+ err = marshalTagAndLength(explicitTag, tagAndLength{
+ class: classContextSpecific,
+ tag: *params.tag,
+ length: bodyLen + tags.Len(),
+ isCompound: true,
+ })
+ }
+
+ return nil
+}
+
+// Marshal returns the ASN.1 encoding of val.
+func Marshal(val interface{}) ([]byte, error) {
+ var out bytes.Buffer
+ v := reflect.ValueOf(val)
+ f := newForkableWriter()
+ err := marshalField(f, v, fieldParameters{})
+ if err != nil {
+ return nil, err
+ }
+ _, err = f.writeTo(&out)
+ return out.Bytes(), nil
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/client/logclient.go b/vendor/src/github.com/google/certificate-transparency/go/client/logclient.go
new file mode 100644
index 0000000000..225b840f0f
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/client/logclient.go
@@ -0,0 +1,357 @@
+// Package client is a CT log client implementation and contains types and code
+// for interacting with RFC6962-compliant CT Log instances.
+// See http://tools.ietf.org/html/rfc6962 for details
+package client
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/google/certificate-transparency/go"
+ "github.com/mreiferson/go-httpclient"
+ "golang.org/x/net/context"
+)
+
+// URI paths for CT Log endpoints
+const (
+ AddChainPath = "/ct/v1/add-chain"
+ AddPreChainPath = "/ct/v1/add-pre-chain"
+ GetSTHPath = "/ct/v1/get-sth"
+ GetEntriesPath = "/ct/v1/get-entries"
+)
+
+// LogClient represents a client for a given CT Log instance
+type LogClient struct {
+ uri string // the base URI of the log. e.g. http://ct.googleapis/pilot
+ httpClient *http.Client // used to interact with the log via HTTP
+}
+
+//////////////////////////////////////////////////////////////////////////////////
+// JSON structures follow.
+// These represent the structures returned by the CT Log server.
+//////////////////////////////////////////////////////////////////////////////////
+
+// addChainRequest represents the JSON request body sent to the add-chain CT
+// method.
+type addChainRequest struct {
+ Chain []string `json:"chain"`
+}
+
+// addChainResponse represents the JSON response to the add-chain CT method.
+// An SCT represents a Log's promise to integrate a [pre-]certificate into the
+// log within a defined period of time.
+type addChainResponse struct {
+ SCTVersion ct.Version `json:"sct_version"` // SCT structure version
+ ID string `json:"id"` // Log ID
+ Timestamp uint64 `json:"timestamp"` // Timestamp of issuance
+ Extensions string `json:"extensions"` // Holder for any CT extensions
+ Signature string `json:"signature"` // Log signature for this SCT
+}
+
+// getSTHResponse respresents the JSON response to the get-sth CT method
+type getSTHResponse struct {
+ TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree
+ Timestamp uint64 `json:"timestamp"` // Time that the tree was created
+ SHA256RootHash string `json:"sha256_root_hash"` // Root hash of the tree
+ TreeHeadSignature string `json:"tree_head_signature"` // Log signature for this STH
+}
+
+// base64LeafEntry respresents a Base64 encoded leaf entry
+type base64LeafEntry struct {
+ LeafInput string `json:"leaf_input"`
+ ExtraData string `json:"extra_data"`
+}
+
+// getEntriesReponse respresents the JSON response to the CT get-entries method
+type getEntriesResponse struct {
+ Entries []base64LeafEntry `json:"entries"` // the list of returned entries
+}
+
+// getConsistencyProofResponse represents the JSON response to the CT get-consistency-proof method
+type getConsistencyProofResponse struct {
+ Consistency []string `json:"consistency"`
+}
+
+// getAuditProofResponse represents the JSON response to the CT get-audit-proof method
+type getAuditProofResponse struct {
+ Hash []string `json:"hash"` // the hashes which make up the proof
+ TreeSize uint64 `json:"tree_size"` // the tree size against which this proof is constructed
+}
+
+// getAcceptedRootsResponse represents the JSON response to the CT get-roots method.
+type getAcceptedRootsResponse struct {
+ Certificates []string `json:"certificates"`
+}
+
+// getEntryAndProodReponse represents the JSON response to the CT get-entry-and-proof method
+type getEntryAndProofResponse struct {
+ LeafInput string `json:"leaf_input"` // the entry itself
+ ExtraData string `json:"extra_data"` // any chain provided when the entry was added to the log
+ AuditPath []string `json:"audit_path"` // the corresponding proof
+}
+
+// New constructs a new LogClient instance.
+// |uri| is the base URI of the CT log instance to interact with, e.g.
+// http://ct.googleapis.com/pilot
+func New(uri string) *LogClient {
+ var c LogClient
+ c.uri = uri
+ transport := &httpclient.Transport{
+ ConnectTimeout: 10 * time.Second,
+ RequestTimeout: 30 * time.Second,
+ ResponseHeaderTimeout: 30 * time.Second,
+ MaxIdleConnsPerHost: 10,
+ DisableKeepAlives: false,
+ }
+ c.httpClient = &http.Client{Transport: transport}
+ return &c
+}
+
+// Makes a HTTP call to |uri|, and attempts to parse the response as a JSON
+// representation of the structure in |res|.
+// Returns a non-nil |error| if there was a problem.
+func (c *LogClient) fetchAndParse(uri string, res interface{}) error {
+ req, err := http.NewRequest("GET", uri, nil)
+ if err != nil {
+ return err
+ }
+ resp, err := c.httpClient.Do(req)
+ var body []byte
+ if resp != nil {
+ body, err = ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return err
+ }
+ }
+ if err != nil {
+ return err
+ }
+ if err = json.Unmarshal(body, &res); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Makes a HTTP POST call to |uri|, and attempts to parse the response as a JSON
+// representation of the structure in |res|.
+// Returns a non-nil |error| if there was a problem.
+func (c *LogClient) postAndParse(uri string, req interface{}, res interface{}) (*http.Response, string, error) {
+ postBody, err := json.Marshal(req)
+ if err != nil {
+ return nil, "", err
+ }
+ httpReq, err := http.NewRequest("POST", uri, bytes.NewReader(postBody))
+ if err != nil {
+ return nil, "", err
+ }
+ httpReq.Header.Set("Keep-Alive", "timeout=15, max=100")
+ httpReq.Header.Set("Content-Type", "application/json")
+ resp, err := c.httpClient.Do(httpReq)
+ // Read all of the body, if there is one, so that the http.Client can do
+ // Keep-Alive:
+ var body []byte
+ if resp != nil {
+ body, err = ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ }
+ if err != nil {
+ return resp, string(body), err
+ }
+ if resp.StatusCode == 200 {
+ if err != nil {
+ return resp, string(body), err
+ }
+ if err = json.Unmarshal(body, &res); err != nil {
+ return resp, string(body), err
+ }
+ }
+ return resp, string(body), nil
+}
+
+func backoffForRetry(ctx context.Context, d time.Duration) error {
+ backoffTimer := time.NewTimer(d)
+ if ctx != nil {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-backoffTimer.C:
+ }
+ } else {
+ <-backoffTimer.C
+ }
+ return nil
+}
+
+// Attempts to add |chain| to the log, using the api end-point specified by
+// |path|. If provided context expires before submission is complete an
+// error will be returned.
+func (c *LogClient) addChainWithRetry(ctx context.Context, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ var resp addChainResponse
+ var req addChainRequest
+ for _, link := range chain {
+ req.Chain = append(req.Chain, base64.StdEncoding.EncodeToString(link))
+ }
+ httpStatus := "Unknown"
+ backoffSeconds := 0
+ done := false
+ for !done {
+ if backoffSeconds > 0 {
+ log.Printf("Got %s, backing-off %d seconds", httpStatus, backoffSeconds)
+ }
+ err := backoffForRetry(ctx, time.Second*time.Duration(backoffSeconds))
+ if err != nil {
+ return nil, err
+ }
+ if backoffSeconds > 0 {
+ backoffSeconds = 0
+ }
+ httpResp, errorBody, err := c.postAndParse(c.uri+path, &req, &resp)
+ if err != nil {
+ backoffSeconds = 10
+ continue
+ }
+ switch {
+ case httpResp.StatusCode == 200:
+ done = true
+ case httpResp.StatusCode == 408:
+ // request timeout, retry immediately
+ case httpResp.StatusCode == 503:
+ // Retry
+ backoffSeconds = 10
+ if retryAfter := httpResp.Header.Get("Retry-After"); retryAfter != "" {
+ if seconds, err := strconv.Atoi(retryAfter); err == nil {
+ backoffSeconds = seconds
+ }
+ }
+ default:
+ return nil, fmt.Errorf("got HTTP Status %s: %s", httpResp.Status, errorBody)
+ }
+ httpStatus = httpResp.Status
+ }
+
+ rawLogID, err := base64.StdEncoding.DecodeString(resp.ID)
+ if err != nil {
+ return nil, err
+ }
+ rawSignature, err := base64.StdEncoding.DecodeString(resp.Signature)
+ if err != nil {
+ return nil, err
+ }
+ ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature))
+ if err != nil {
+ return nil, err
+ }
+ var logID ct.SHA256Hash
+ copy(logID[:], rawLogID)
+ return &ct.SignedCertificateTimestamp{
+ SCTVersion: resp.SCTVersion,
+ LogID: logID,
+ Timestamp: resp.Timestamp,
+ Extensions: ct.CTExtensions(resp.Extensions),
+ Signature: *ds}, nil
+}
+
+// AddChain adds the (DER represented) X509 |chain| to the log.
+func (c *LogClient) AddChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ return c.addChainWithRetry(nil, AddChainPath, chain)
+}
+
+// AddPreChain adds the (DER represented) Precertificate |chain| to the log.
+func (c *LogClient) AddPreChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ return c.addChainWithRetry(nil, AddPreChainPath, chain)
+}
+
+// AddChainWithContext adds the (DER represented) X509 |chain| to the log and
+// fails if the provided context expires before the chain is submitted.
+func (c *LogClient) AddChainWithContext(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ return c.addChainWithRetry(ctx, AddChainPath, chain)
+}
+
+// GetSTH retrieves the current STH from the log.
+// Returns a populated SignedTreeHead, or a non-nil error.
+func (c *LogClient) GetSTH() (sth *ct.SignedTreeHead, err error) {
+ var resp getSTHResponse
+ if err = c.fetchAndParse(c.uri+GetSTHPath, &resp); err != nil {
+ return
+ }
+ sth = &ct.SignedTreeHead{
+ TreeSize: resp.TreeSize,
+ Timestamp: resp.Timestamp,
+ }
+
+ rawRootHash, err := base64.StdEncoding.DecodeString(resp.SHA256RootHash)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 encoding in sha256_root_hash: %v", err)
+ }
+ if len(rawRootHash) != sha256.Size {
+ return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(rawRootHash))
+ }
+ copy(sth.SHA256RootHash[:], rawRootHash)
+
+ rawSignature, err := base64.StdEncoding.DecodeString(resp.TreeHeadSignature)
+ if err != nil {
+ return nil, errors.New("invalid base64 encoding in tree_head_signature")
+ }
+ ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature))
+ if err != nil {
+ return nil, err
+ }
+ // TODO(alcutter): Verify signature
+ sth.TreeHeadSignature = *ds
+ return
+}
+
+// GetEntries attempts to retrieve the entries in the sequence [|start|, |end|] from the CT
+// log server. (see section 4.6.)
+// Returns a slice of LeafInputs or a non-nil error.
+func (c *LogClient) GetEntries(start, end int64) ([]ct.LogEntry, error) {
+ if end < 0 {
+ return nil, errors.New("end should be >= 0")
+ }
+ if end < start {
+ return nil, errors.New("start should be <= end")
+ }
+ var resp getEntriesResponse
+ err := c.fetchAndParse(fmt.Sprintf("%s%s?start=%d&end=%d", c.uri, GetEntriesPath, start, end), &resp)
+ if err != nil {
+ return nil, err
+ }
+ entries := make([]ct.LogEntry, len(resp.Entries))
+ for index, entry := range resp.Entries {
+ leafBytes, err := base64.StdEncoding.DecodeString(entry.LeafInput)
+ leaf, err := ct.ReadMerkleTreeLeaf(bytes.NewBuffer(leafBytes))
+ if err != nil {
+ return nil, err
+ }
+ entries[index].Leaf = *leaf
+ chainBytes, err := base64.StdEncoding.DecodeString(entry.ExtraData)
+
+ var chain []ct.ASN1Cert
+ switch leaf.TimestampedEntry.EntryType {
+ case ct.X509LogEntryType:
+ chain, err = ct.UnmarshalX509ChainArray(chainBytes)
+
+ case ct.PrecertLogEntryType:
+ chain, err = ct.UnmarshalPrecertChainArray(chainBytes)
+
+ default:
+ return nil, fmt.Errorf("saw unknown entry type: %v", leaf.TimestampedEntry.EntryType)
+ }
+ if err != nil {
+ return nil, err
+ }
+ entries[index].Chain = chain
+ entries[index].Index = start + int64(index)
+ }
+ return entries, nil
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/serialization.go b/vendor/src/github.com/google/certificate-transparency/go/serialization.go
new file mode 100644
index 0000000000..aab3a3f20c
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/serialization.go
@@ -0,0 +1,512 @@
+package ct
+
+import (
+ "bytes"
+ "container/list"
+ "crypto"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Variable size structure prefix-header byte lengths
+const (
+ CertificateLengthBytes = 3
+ PreCertificateLengthBytes = 3
+ ExtensionsLengthBytes = 2
+ CertificateChainLengthBytes = 3
+ SignatureLengthBytes = 2
+)
+
+// Max lengths
+const (
+ MaxCertificateLength = (1 << 24) - 1
+ MaxExtensionsLength = (1 << 16) - 1
+)
+
+func writeUint(w io.Writer, value uint64, numBytes int) error {
+ buf := make([]uint8, numBytes)
+ for i := 0; i < numBytes; i++ {
+ buf[numBytes-i-1] = uint8(value & 0xff)
+ value >>= 8
+ }
+ if value != 0 {
+ return errors.New("numBytes was insufficiently large to represent value")
+ }
+ if _, err := w.Write(buf); err != nil {
+ return err
+ }
+ return nil
+}
+
+func writeVarBytes(w io.Writer, value []byte, numLenBytes int) error {
+ if err := writeUint(w, uint64(len(value)), numLenBytes); err != nil {
+ return err
+ }
+ if _, err := w.Write(value); err != nil {
+ return err
+ }
+ return nil
+}
+
+func readUint(r io.Reader, numBytes int) (uint64, error) {
+ var l uint64
+ for i := 0; i < numBytes; i++ {
+ l <<= 8
+ var t uint8
+ if err := binary.Read(r, binary.BigEndian, &t); err != nil {
+ return 0, err
+ }
+ l |= uint64(t)
+ }
+ return l, nil
+}
+
+// Reads a variable length array of bytes from |r|. |numLenBytes| specifies the
+// number of (BigEndian) prefix-bytes which contain the length of the actual
+// array data bytes that follow.
+// Allocates an array to hold the contents and returns a slice view into it if
+// the read was successful, or an error otherwise.
+func readVarBytes(r io.Reader, numLenBytes int) ([]byte, error) {
+ switch {
+ case numLenBytes > 8:
+ return nil, fmt.Errorf("numLenBytes too large (%d)", numLenBytes)
+ case numLenBytes == 0:
+ return nil, errors.New("numLenBytes should be > 0")
+ }
+ l, err := readUint(r, numLenBytes)
+ if err != nil {
+ return nil, err
+ }
+ data := make([]byte, l)
+ n, err := r.Read(data)
+ if err != nil {
+ return nil, err
+ }
+ if n != int(l) {
+ return nil, fmt.Errorf("short read: expected %d but got %d", l, n)
+ }
+ return data, nil
+}
+
+// Reads a list of ASN1Cert types from |r|
+func readASN1CertList(r io.Reader, totalLenBytes int, elementLenBytes int) ([]ASN1Cert, error) {
+ listBytes, err := readVarBytes(r, totalLenBytes)
+ if err != nil {
+ return []ASN1Cert{}, err
+ }
+ list := list.New()
+ listReader := bytes.NewReader(listBytes)
+ var entry []byte
+ for err == nil {
+ entry, err = readVarBytes(listReader, elementLenBytes)
+ if err != nil {
+ if err != io.EOF {
+ return []ASN1Cert{}, err
+ }
+ } else {
+ list.PushBack(entry)
+ }
+ }
+ ret := make([]ASN1Cert, list.Len())
+ i := 0
+ for e := list.Front(); e != nil; e = e.Next() {
+ ret[i] = e.Value.([]byte)
+ i++
+ }
+ return ret, nil
+}
+
+// ReadTimestampedEntryInto parses the byte-stream representation of a
+// TimestampedEntry from |r| and populates the struct |t| with the data. See
+// RFC section 3.4 for details on the format.
+// Returns a non-nil error if there was a problem.
+func ReadTimestampedEntryInto(r io.Reader, t *TimestampedEntry) error {
+ var err error
+ if err = binary.Read(r, binary.BigEndian, &t.Timestamp); err != nil {
+ return err
+ }
+ if err = binary.Read(r, binary.BigEndian, &t.EntryType); err != nil {
+ return err
+ }
+ switch t.EntryType {
+ case X509LogEntryType:
+ if t.X509Entry, err = readVarBytes(r, CertificateLengthBytes); err != nil {
+ return err
+ }
+ case PrecertLogEntryType:
+ if err := binary.Read(r, binary.BigEndian, &t.PrecertEntry.IssuerKeyHash); err != nil {
+ return err
+ }
+ if t.PrecertEntry.TBSCertificate, err = readVarBytes(r, PreCertificateLengthBytes); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unknown EntryType: %d", t.EntryType)
+ }
+ t.Extensions, err = readVarBytes(r, ExtensionsLengthBytes)
+ return nil
+}
+
+// ReadMerkleTreeLeaf parses the byte-stream representation of a MerkleTreeLeaf
+// and returns a pointer to a new MerkleTreeLeaf structure containing the
+// parsed data.
+// See RFC section 3.4 for details on the format.
+// Returns a pointer to a new MerkleTreeLeaf or non-nil error if there was a
+// problem
+func ReadMerkleTreeLeaf(r io.Reader) (*MerkleTreeLeaf, error) {
+ var m MerkleTreeLeaf
+ if err := binary.Read(r, binary.BigEndian, &m.Version); err != nil {
+ return nil, err
+ }
+ if m.Version != V1 {
+ return nil, fmt.Errorf("unknown Version %d", m.Version)
+ }
+ if err := binary.Read(r, binary.BigEndian, &m.LeafType); err != nil {
+ return nil, err
+ }
+ if m.LeafType != TimestampedEntryLeafType {
+ return nil, fmt.Errorf("unknown LeafType %d", m.LeafType)
+ }
+ if err := ReadTimestampedEntryInto(r, &m.TimestampedEntry); err != nil {
+ return nil, err
+ }
+ return &m, nil
+}
+
+// UnmarshalX509ChainArray unmarshalls the contents of the "chain:" entry in a
+// GetEntries response in the case where the entry refers to an X509 leaf.
+func UnmarshalX509ChainArray(b []byte) ([]ASN1Cert, error) {
+ return readASN1CertList(bytes.NewReader(b), CertificateChainLengthBytes, CertificateLengthBytes)
+}
+
+// UnmarshalPrecertChainArray unmarshalls the contents of the "chain:" entry in
+// a GetEntries response in the case where the entry refers to a Precertificate
+// leaf.
+func UnmarshalPrecertChainArray(b []byte) ([]ASN1Cert, error) {
+ var chain []ASN1Cert
+
+ reader := bytes.NewReader(b)
+ // read the pre-cert entry:
+ precert, err := readVarBytes(reader, CertificateLengthBytes)
+ if err != nil {
+ return chain, err
+ }
+ chain = append(chain, precert)
+ // and then read and return the chain up to the root:
+ remainingChain, err := readASN1CertList(reader, CertificateChainLengthBytes, CertificateLengthBytes)
+ if err != nil {
+ return chain, err
+ }
+ chain = append(chain, remainingChain...)
+ return chain, nil
+}
+
+// UnmarshalDigitallySigned reconstructs a DigitallySigned structure from a Reader
+func UnmarshalDigitallySigned(r io.Reader) (*DigitallySigned, error) {
+ var h byte
+ if err := binary.Read(r, binary.BigEndian, &h); err != nil {
+ return nil, fmt.Errorf("failed to read HashAlgorithm: %v", err)
+ }
+
+ var s byte
+ if err := binary.Read(r, binary.BigEndian, &s); err != nil {
+ return nil, fmt.Errorf("failed to read SignatureAlgorithm: %v", err)
+ }
+
+ sig, err := readVarBytes(r, SignatureLengthBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read Signature bytes: %v", err)
+ }
+
+ return &DigitallySigned{
+ HashAlgorithm: HashAlgorithm(h),
+ SignatureAlgorithm: SignatureAlgorithm(s),
+ Signature: sig,
+ }, nil
+}
+
+func marshalDigitallySignedHere(ds DigitallySigned, here []byte) ([]byte, error) {
+ sigLen := len(ds.Signature)
+ dsOutLen := 2 + SignatureLengthBytes + sigLen
+ if here == nil {
+ here = make([]byte, dsOutLen)
+ }
+ if len(here) < dsOutLen {
+ return nil, ErrNotEnoughBuffer
+ }
+ here = here[0:dsOutLen]
+
+ here[0] = byte(ds.HashAlgorithm)
+ here[1] = byte(ds.SignatureAlgorithm)
+ binary.BigEndian.PutUint16(here[2:4], uint16(sigLen))
+ copy(here[4:], ds.Signature)
+
+ return here, nil
+}
+
+// MarshalDigitallySigned marshalls a DigitallySigned structure into a byte array
+func MarshalDigitallySigned(ds DigitallySigned) ([]byte, error) {
+ return marshalDigitallySignedHere(ds, nil)
+}
+
+func checkCertificateFormat(cert ASN1Cert) error {
+ if len(cert) == 0 {
+ return errors.New("certificate is zero length")
+ }
+ if len(cert) > MaxCertificateLength {
+ return errors.New("certificate too large")
+ }
+ return nil
+}
+
+func checkExtensionsFormat(ext CTExtensions) error {
+ if len(ext) > MaxExtensionsLength {
+ return errors.New("extensions too large")
+ }
+ return nil
+}
+
+func serializeV1CertSCTSignatureInput(timestamp uint64, cert ASN1Cert, ext CTExtensions) ([]byte, error) {
+ if err := checkCertificateFormat(cert); err != nil {
+ return nil, err
+ }
+ if err := checkExtensionsFormat(ext); err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, X509LogEntryType); err != nil {
+ return nil, err
+ }
+ if err := writeVarBytes(&buf, cert, CertificateLengthBytes); err != nil {
+ return nil, err
+ }
+ if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func serializeV1PrecertSCTSignatureInput(timestamp uint64, issuerKeyHash [issuerKeyHashLength]byte, tbs []byte, ext CTExtensions) ([]byte, error) {
+ if err := checkCertificateFormat(tbs); err != nil {
+ return nil, err
+ }
+ if err := checkExtensionsFormat(ext); err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, PrecertLogEntryType); err != nil {
+ return nil, err
+ }
+ if _, err := buf.Write(issuerKeyHash[:]); err != nil {
+ return nil, err
+ }
+ if err := writeVarBytes(&buf, tbs, CertificateLengthBytes); err != nil {
+ return nil, err
+ }
+ if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func serializeV1SCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
+ if sct.SCTVersion != V1 {
+ return nil, fmt.Errorf("unsupported SCT version, expected V1, but got %s", sct.SCTVersion)
+ }
+ if entry.Leaf.LeafType != TimestampedEntryLeafType {
+ return nil, fmt.Errorf("Unsupported leaf type %s", entry.Leaf.LeafType)
+ }
+ switch entry.Leaf.TimestampedEntry.EntryType {
+ case X509LogEntryType:
+ return serializeV1CertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.X509Entry, entry.Leaf.TimestampedEntry.Extensions)
+ case PrecertLogEntryType:
+ return serializeV1PrecertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
+ entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
+ entry.Leaf.TimestampedEntry.Extensions)
+ default:
+ return nil, fmt.Errorf("unknown TimestampedEntryLeafType %s", entry.Leaf.TimestampedEntry.EntryType)
+ }
+}
+
+// SerializeSCTSignatureInput serializes the passed in sct and log entry into
+// the correct format for signing.
+func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
+ switch sct.SCTVersion {
+ case V1:
+ return serializeV1SCTSignatureInput(sct, entry)
+ default:
+ return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
+ }
+}
+
+// SerializedLength will return the space (in bytes)
+func (sct SignedCertificateTimestamp) SerializedLength() (int, error) {
+ switch sct.SCTVersion {
+ case V1:
+ extLen := len(sct.Extensions)
+ sigLen := len(sct.Signature.Signature)
+ return 1 + 32 + 8 + 2 + extLen + 2 + 2 + sigLen, nil
+ default:
+ return 0, ErrInvalidVersion
+ }
+}
+
+func serializeV1SCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
+ if sct.SCTVersion != V1 {
+ return nil, ErrInvalidVersion
+ }
+ sctLen, err := sct.SerializedLength()
+ if err != nil {
+ return nil, err
+ }
+ if here == nil {
+ here = make([]byte, sctLen)
+ }
+ if len(here) < sctLen {
+ return nil, ErrNotEnoughBuffer
+ }
+ if err := checkExtensionsFormat(sct.Extensions); err != nil {
+ return nil, err
+ }
+
+ here = here[0:sctLen]
+
+ // Write Version
+ here[0] = byte(sct.SCTVersion)
+
+ // Write LogID
+ copy(here[1:33], sct.LogID[:])
+
+ // Write Timestamp
+ binary.BigEndian.PutUint64(here[33:41], sct.Timestamp)
+
+ // Write Extensions
+ extLen := len(sct.Extensions)
+ binary.BigEndian.PutUint16(here[41:43], uint16(extLen))
+ n := 43 + extLen
+ copy(here[43:n], sct.Extensions)
+
+ // Write Signature
+ _, err = marshalDigitallySignedHere(sct.Signature, here[n:])
+ if err != nil {
+ return nil, err
+ }
+ return here, nil
+}
+
+// SerializeSCTHere serializes the passed in sct into the format specified
+// by RFC6962 section 3.2.
+// If a bytes slice here is provided then it will attempt to serialize into the
+// provided byte slice, ErrNotEnoughBuffer will be returned if the buffer is
+// too small.
+// If a nil byte slice is provided, a buffer for will be allocated for you
+// The returned slice will be sliced to the correct length.
+func SerializeSCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
+ switch sct.SCTVersion {
+ case V1:
+ return serializeV1SCTHere(sct, here)
+ default:
+ return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
+ }
+}
+
+// SerializeSCT serializes the passed in sct into the format specified
+// by RFC6962 section 3.2
+// Equivalent to SerializeSCTHere(sct, nil)
+func SerializeSCT(sct SignedCertificateTimestamp) ([]byte, error) {
+ return SerializeSCTHere(sct, nil)
+}
+
+func deserializeSCTV1(r io.Reader, sct *SignedCertificateTimestamp) error {
+ if err := binary.Read(r, binary.BigEndian, &sct.LogID); err != nil {
+ return err
+ }
+ if err := binary.Read(r, binary.BigEndian, &sct.Timestamp); err != nil {
+ return err
+ }
+ ext, err := readVarBytes(r, ExtensionsLengthBytes)
+ if err != nil {
+ return err
+ }
+ sct.Extensions = ext
+ ds, err := UnmarshalDigitallySigned(r)
+ if err != nil {
+ return err
+ }
+ sct.Signature = *ds
+ return nil
+}
+
+func DeserializeSCT(r io.Reader) (*SignedCertificateTimestamp, error) {
+ var sct SignedCertificateTimestamp
+ if err := binary.Read(r, binary.BigEndian, &sct.SCTVersion); err != nil {
+ return nil, err
+ }
+ switch sct.SCTVersion {
+ case V1:
+ return &sct, deserializeSCTV1(r, &sct)
+ default:
+ return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
+ }
+}
+
+func serializeV1STHSignatureInput(sth SignedTreeHead) ([]byte, error) {
+ if sth.Version != V1 {
+ return nil, fmt.Errorf("invalid STH version %d", sth.Version)
+ }
+ if sth.TreeSize < 0 {
+ return nil, fmt.Errorf("invalid tree size %d", sth.TreeSize)
+ }
+ if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
+ return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
+ }
+
+ var buf bytes.Buffer
+ if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, TreeHashSignatureType); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, sth.Timestamp); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, sth.TreeSize); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&buf, binary.BigEndian, sth.SHA256RootHash); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// SerializeSTHSignatureInput serializes the passed in sth into the correct
+// format for signing.
+func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
+ switch sth.Version {
+ case V1:
+ return serializeV1STHSignatureInput(sth)
+ default:
+ return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
+ }
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/signatures.go b/vendor/src/github.com/google/certificate-transparency/go/signatures.go
new file mode 100644
index 0000000000..600db24549
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/signatures.go
@@ -0,0 +1,131 @@
+package ct
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/pem"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "math/big"
+)
+
+var allowVerificationWithNonCompliantKeys = flag.Bool("allow_verification_with_non_compliant_keys", false,
+ "Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.")
+
+// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
+func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
+ p, rest := pem.Decode(b)
+ if p == nil {
+ return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b))
+ }
+ k, err := x509.ParsePKIXPublicKey(p.Bytes)
+ return k, sha256.Sum256(p.Bytes), rest, err
+}
+
+// SignatureVerifier can verify signatures on SCTs and STHs
+type SignatureVerifier struct {
+ pubKey crypto.PublicKey
+}
+
+// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
+func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
+ switch pkType := pk.(type) {
+ case *rsa.PublicKey:
+ if pkType.N.BitLen() < 2048 {
+ e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
+ if !(*allowVerificationWithNonCompliantKeys) {
+ return nil, e
+ }
+ log.Printf("WARNING: %v", e)
+ }
+ case *ecdsa.PublicKey:
+ params := *(pkType.Params())
+ if params != *elliptic.P256().Params() {
+ e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
+ if !(*allowVerificationWithNonCompliantKeys) {
+ return nil, e
+ }
+ log.Printf("WARNING: %v", e)
+
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported public key type %v", pkType)
+ }
+
+ return &SignatureVerifier{
+ pubKey: pk,
+ }, nil
+}
+
+// verifySignature verifies that the passed in signature over data was created by our PublicKey.
+// Currently, only SHA256 is supported as a HashAlgorithm, and only ECDSA and RSA signatures are supported.
+func (s SignatureVerifier) verifySignature(data []byte, sig DigitallySigned) error {
+ if sig.HashAlgorithm != SHA256 {
+ return fmt.Errorf("unsupported HashAlgorithm in signature: %v", sig.HashAlgorithm)
+ }
+
+ hasherType := crypto.SHA256
+ hasher := hasherType.New()
+ if _, err := hasher.Write(data); err != nil {
+ return fmt.Errorf("failed to write to hasher: %v", err)
+ }
+ hash := hasher.Sum([]byte{})
+
+ switch sig.SignatureAlgorithm {
+ case RSA:
+ rsaKey, ok := s.pubKey.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("cannot verify RSA signature with %T key", s.pubKey)
+ }
+ if err := rsa.VerifyPKCS1v15(rsaKey, hasherType, hash, sig.Signature); err != nil {
+ return fmt.Errorf("failed to verify rsa signature: %v", err)
+ }
+ case ECDSA:
+ ecdsaKey, ok := s.pubKey.(*ecdsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("cannot verify ECDSA signature with %T key", s.pubKey)
+ }
+ var ecdsaSig struct {
+ R, S *big.Int
+ }
+ rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err)
+ }
+ if len(rest) != 0 {
+ log.Printf("Garbage following signature %v", rest)
+ }
+
+ if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) {
+ return errors.New("failed to verify ecdsa signature")
+ }
+ default:
+ return fmt.Errorf("unsupported signature type %v", sig.SignatureAlgorithm)
+ }
+ return nil
+}
+
+// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry
+func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error {
+ sctData, err := SerializeSCTSignatureInput(sct, entry)
+ if err != nil {
+ return err
+ }
+ return s.verifySignature(sctData, sct.Signature)
+}
+
+// VerifySTHSignature verifies that the STH's signature is valid.
+func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error {
+ sthData, err := SerializeSTHSignatureInput(sth)
+ if err != nil {
+ return err
+ }
+ return s.verifySignature(sthData, sth.TreeHeadSignature)
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/types.go b/vendor/src/github.com/google/certificate-transparency/go/types.go
new file mode 100644
index 0000000000..8a63e98e60
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/types.go
@@ -0,0 +1,363 @@
+package ct
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/certificate-transparency/go/x509"
+)
+
+const (
+ issuerKeyHashLength = 32
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// The following structures represent those outlined in the RFC6962 document:
+///////////////////////////////////////////////////////////////////////////////
+
+// LogEntryType represents the LogEntryType enum from section 3.1 of the RFC:
+// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
+type LogEntryType uint16
+
+func (e LogEntryType) String() string {
+ switch e {
+ case X509LogEntryType:
+ return "X509LogEntryType"
+ case PrecertLogEntryType:
+ return "PrecertLogEntryType"
+ }
+ panic(fmt.Sprintf("No string defined for LogEntryType constant value %d", e))
+}
+
+// LogEntryType constants, see section 3.1 of RFC6962.
+const (
+ X509LogEntryType LogEntryType = 0
+ PrecertLogEntryType LogEntryType = 1
+)
+
+// MerkleLeafType represents the MerkleLeafType enum from section 3.4 of the
+// RFC: enum { timestamped_entry(0), (255) } MerkleLeafType;
+type MerkleLeafType uint8
+
+func (m MerkleLeafType) String() string {
+ switch m {
+ case TimestampedEntryLeafType:
+ return "TimestampedEntryLeafType"
+ default:
+ return fmt.Sprintf("UnknownLeafType(%d)", m)
+ }
+}
+
+// MerkleLeafType constants, see section 3.4 of the RFC.
+const (
+ TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT
+)
+
+// Version represents the Version enum from section 3.2 of the RFC:
+// enum { v1(0), (255) } Version;
+type Version uint8
+
+func (v Version) String() string {
+ switch v {
+ case V1:
+ return "V1"
+ default:
+ return fmt.Sprintf("UnknownVersion(%d)", v)
+ }
+}
+
+// CT Version constants, see section 3.2 of the RFC.
+const (
+ V1 Version = 0
+)
+
+// SignatureType differentiates STH signatures from SCT signatures, see RFC
+// section 3.2
+type SignatureType uint8
+
+func (st SignatureType) String() string {
+ switch st {
+ case CertificateTimestampSignatureType:
+ return "CertificateTimestamp"
+ case TreeHashSignatureType:
+ return "TreeHash"
+ default:
+ return fmt.Sprintf("UnknownSignatureType(%d)", st)
+ }
+}
+
+// SignatureType constants, see RFC section 3.2
+const (
+ CertificateTimestampSignatureType SignatureType = 0
+ TreeHashSignatureType SignatureType = 1
+)
+
+// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate
+// (section 3.1)
+type ASN1Cert []byte
+
+// PreCert represents a Precertificate (section 3.2)
+type PreCert struct {
+ IssuerKeyHash [issuerKeyHashLength]byte
+ TBSCertificate []byte
+}
+
+// CTExtensions is a representation of the raw bytes of any CtExtension
+// structure (see section 3.2)
+type CTExtensions []byte
+
+// MerkleTreeNode represents an internal node in the CT tree
+type MerkleTreeNode []byte
+
+// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and
+// 4.4)
+type ConsistencyProof []MerkleTreeNode
+
+// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5)
+type AuditPath []MerkleTreeNode
+
+// LeafInput represents a serialized MerkleTreeLeaf structure
+type LeafInput []byte
+
+// HashAlgorithm from the DigitallySigned struct
+type HashAlgorithm byte
+
+// HashAlgorithm constants
+const (
+ None HashAlgorithm = 0
+ MD5 HashAlgorithm = 1
+ SHA1 HashAlgorithm = 2
+ SHA224 HashAlgorithm = 3
+ SHA256 HashAlgorithm = 4
+ SHA384 HashAlgorithm = 5
+ SHA512 HashAlgorithm = 6
+)
+
+func (h HashAlgorithm) String() string {
+ switch h {
+ case None:
+ return "None"
+ case MD5:
+ return "MD5"
+ case SHA1:
+ return "SHA1"
+ case SHA224:
+ return "SHA224"
+ case SHA256:
+ return "SHA256"
+ case SHA384:
+ return "SHA384"
+ case SHA512:
+ return "SHA512"
+ default:
+ return fmt.Sprintf("UNKNOWN(%d)", h)
+ }
+}
+
+// SignatureAlgorithm from the the DigitallySigned struct
+type SignatureAlgorithm byte
+
+// SignatureAlgorithm constants
+const (
+ Anonymous SignatureAlgorithm = 0
+ RSA SignatureAlgorithm = 1
+ DSA SignatureAlgorithm = 2
+ ECDSA SignatureAlgorithm = 3
+)
+
+func (s SignatureAlgorithm) String() string {
+ switch s {
+ case Anonymous:
+ return "Anonymous"
+ case RSA:
+ return "RSA"
+ case DSA:
+ return "DSA"
+ case ECDSA:
+ return "ECDSA"
+ default:
+ return fmt.Sprintf("UNKNOWN(%d)", s)
+ }
+}
+
+// DigitallySigned represents an RFC5246 DigitallySigned structure
+type DigitallySigned struct {
+ HashAlgorithm HashAlgorithm
+ SignatureAlgorithm SignatureAlgorithm
+ Signature []byte
+}
+
+// FromBase64String populates the DigitallySigned structure from the base64 data passed in.
+// Returns an error if the base64 data is invalid.
+func (d *DigitallySigned) FromBase64String(b64 string) error {
+ raw, err := base64.StdEncoding.DecodeString(b64)
+ if err != nil {
+ return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err)
+ }
+ ds, err := UnmarshalDigitallySigned(bytes.NewReader(raw))
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
+ }
+ *d = *ds
+ return nil
+}
+
+// Base64String returns the base64 representation of the DigitallySigned struct.
+func (d DigitallySigned) Base64String() (string, error) {
+ b, err := MarshalDigitallySigned(d)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (d DigitallySigned) MarshalJSON() ([]byte, error) {
+ b64, err := d.Base64String()
+ if err != nil {
+ return []byte{}, err
+ }
+ return []byte(`"` + b64 + `"`), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
+ var content string
+ if err := json.Unmarshal(b, &content); err != nil {
+ return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
+ }
+ return d.FromBase64String(content)
+}
+
+// LogEntry represents the contents of an entry in a CT log, see section 3.1.
+type LogEntry struct {
+ Index int64
+ Leaf MerkleTreeLeaf
+ X509Cert *x509.Certificate
+ Precert *Precertificate
+ Chain []ASN1Cert
+}
+
+// SHA256Hash represents the output from the SHA256 hash function.
+type SHA256Hash [sha256.Size]byte
+
+// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in.
+func (s *SHA256Hash) FromBase64String(b64 string) error {
+ bs, err := base64.StdEncoding.DecodeString(b64)
+ if err != nil {
+ return fmt.Errorf("failed to unbase64 LogID: %v", err)
+ }
+ if len(bs) != sha256.Size {
+ return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs))
+ }
+ copy(s[:], bs)
+ return nil
+}
+
+// Base64String returns the base64 representation of this SHA256Hash.
+func (s SHA256Hash) Base64String() string {
+ return base64.StdEncoding.EncodeToString(s[:])
+}
+
+// MarshalJSON implements the json.Marshaller interface for SHA256Hash.
+func (s SHA256Hash) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + s.Base64String() + `"`), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (s *SHA256Hash) UnmarshalJSON(b []byte) error {
+ var content string
+ if err := json.Unmarshal(b, &content); err != nil {
+ return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err)
+ }
+ return s.FromBase64String(content)
+}
+
+// SignedTreeHead represents the structure returned by the get-sth CT method
+// after base64 decoding. See sections 3.5 and 4.3 in the RFC)
+type SignedTreeHead struct {
+ Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms
+ TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree
+ Timestamp uint64 `json:"timestamp"` // The time at which the STH was created
+ SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree
+ TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // The Log's signature for this STH (see RFC section 3.5)
+ LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
+}
+
+// SignedCertificateTimestamp represents the structure returned by the
+// add-chain and add-pre-chain methods after base64 decoding. (see RFC sections
+// 3.2 ,4.1 and 4.2)
+type SignedCertificateTimestamp struct {
+ SCTVersion Version // The version of the protocol to which the SCT conforms
+ LogID SHA256Hash // the SHA-256 hash of the log's public key, calculated over
+ // the DER encoding of the key represented as SubjectPublicKeyInfo.
+ Timestamp uint64 // Timestamp (in ms since unix epoc) at which the SCT was issued
+ Extensions CTExtensions // For future extensions to the protocol
+ Signature DigitallySigned // The Log's signature for this SCT
+}
+
+func (s SignedCertificateTimestamp) String() string {
+ return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion,
+ base64.StdEncoding.EncodeToString(s.LogID[:]),
+ s.Timestamp,
+ s.Extensions,
+ s.Signature)
+}
+
+// TimestampedEntry is part of the MerkleTreeLeaf structure.
+// See RFC section 3.4
+type TimestampedEntry struct {
+ Timestamp uint64
+ EntryType LogEntryType
+ X509Entry ASN1Cert
+ PrecertEntry PreCert
+ Extensions CTExtensions
+}
+
+// MerkleTreeLeaf represents the deserialized sructure of the hash input for the
+// leaves of a log's Merkle tree. See RFC section 3.4
+type MerkleTreeLeaf struct {
+ Version Version // the version of the protocol to which the MerkleTreeLeaf corresponds
+ LeafType MerkleLeafType // The type of the leaf input, currently only TimestampedEntry can exist
+ TimestampedEntry TimestampedEntry // The entry data itself
+}
+
+// Precertificate represents the parsed CT Precertificate structure.
+type Precertificate struct {
+ // Raw DER bytes of the precert
+ Raw []byte
+ // SHA256 hash of the issuing key
+ IssuerKeyHash [issuerKeyHashLength]byte
+ // Parsed TBSCertificate structure (held in an x509.Certificate for ease of
+ // access.
+ TBSCertificate x509.Certificate
+}
+
+// X509Certificate returns the X.509 Certificate contained within the
+// MerkleTreeLeaf.
+// Returns a pointer to an x509.Certificate or a non-nil error.
+func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) {
+ return x509.ParseCertificate(m.TimestampedEntry.X509Entry)
+}
+
+type sctError int
+
+// Preallocate errors for performance
+var (
+ ErrInvalidVersion error = sctError(1)
+ ErrNotEnoughBuffer error = sctError(2)
+)
+
+func (e sctError) Error() string {
+ switch e {
+ case ErrInvalidVersion:
+ return "invalid SCT version detected"
+ case ErrNotEnoughBuffer:
+ return "provided buffer was too small"
+ default:
+ return "unknown error"
+ }
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/cert_pool.go b/vendor/src/github.com/google/certificate-transparency/go/x509/cert_pool.go
new file mode 100755
index 0000000000..babe94d41c
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/cert_pool.go
@@ -0,0 +1,116 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "encoding/pem"
+)
+
+// CertPool is a set of certificates.
+type CertPool struct {
+ bySubjectKeyId map[string][]int
+ byName map[string][]int
+ certs []*Certificate
+}
+
+// NewCertPool returns a new, empty CertPool.
+func NewCertPool() *CertPool {
+ return &CertPool{
+ make(map[string][]int),
+ make(map[string][]int),
+ nil,
+ }
+}
+
+// findVerifiedParents attempts to find certificates in s which have signed the
+// given certificate. If any candidates were rejected then errCert will be set
+// to one of them, arbitrarily, and err will contain the reason that it was
+// rejected.
+func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) {
+ if s == nil {
+ return
+ }
+ var candidates []int
+
+ if len(cert.AuthorityKeyId) > 0 {
+ candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)]
+ }
+ if len(candidates) == 0 {
+ candidates = s.byName[string(cert.RawIssuer)]
+ }
+
+ for _, c := range candidates {
+ if err = cert.CheckSignatureFrom(s.certs[c]); err == nil {
+ parents = append(parents, c)
+ } else {
+ errCert = s.certs[c]
+ }
+ }
+
+ return
+}
+
+// AddCert adds a certificate to a pool.
+func (s *CertPool) AddCert(cert *Certificate) {
+ if cert == nil {
+ panic("adding nil Certificate to CertPool")
+ }
+
+ // Check that the certificate isn't being added twice.
+ for _, c := range s.certs {
+ if c.Equal(cert) {
+ return
+ }
+ }
+
+ n := len(s.certs)
+ s.certs = append(s.certs, cert)
+
+ if len(cert.SubjectKeyId) > 0 {
+ keyId := string(cert.SubjectKeyId)
+ s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n)
+ }
+ name := string(cert.RawSubject)
+ s.byName[name] = append(s.byName[name], n)
+}
+
+// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
+// It appends any certificates found to s and returns true if any certificates
+// were successfully parsed.
+//
+// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
+// of root CAs in a format suitable for this function.
+func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ break
+ }
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ continue
+ }
+
+ cert, err := ParseCertificate(block.Bytes)
+ if err != nil {
+ continue
+ }
+
+ s.AddCert(cert)
+ ok = true
+ }
+
+ return
+}
+
+// Subjects returns a list of the DER-encoded subjects of
+// all of the certificates in the pool.
+func (s *CertPool) Subjects() (res [][]byte) {
+ res = make([][]byte, len(s.certs))
+ for i, c := range s.certs {
+ res[i] = c.RawSubject
+ }
+ return
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go b/vendor/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go
new file mode 100755
index 0000000000..194c81bf68
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go
@@ -0,0 +1,233 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+// RFC 1423 describes the encryption of PEM blocks. The algorithm used to
+// generate a key from the password was derived by looking at the OpenSSL
+// implementation.
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/md5"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "io"
+ "strings"
+)
+
+type PEMCipher int
+
+// Possible values for the EncryptPEMBlock encryption algorithm.
+const (
+ _ PEMCipher = iota
+ PEMCipherDES
+ PEMCipher3DES
+ PEMCipherAES128
+ PEMCipherAES192
+ PEMCipherAES256
+)
+
+// rfc1423Algo holds a method for enciphering a PEM block.
+type rfc1423Algo struct {
+ cipher PEMCipher
+ name string
+ cipherFunc func(key []byte) (cipher.Block, error)
+ keySize int
+ blockSize int
+}
+
+// rfc1423Algos holds a slice of the possible ways to encrypt a PEM
+// block. The ivSize numbers were taken from the OpenSSL source.
+var rfc1423Algos = []rfc1423Algo{{
+ cipher: PEMCipherDES,
+ name: "DES-CBC",
+ cipherFunc: des.NewCipher,
+ keySize: 8,
+ blockSize: des.BlockSize,
+}, {
+ cipher: PEMCipher3DES,
+ name: "DES-EDE3-CBC",
+ cipherFunc: des.NewTripleDESCipher,
+ keySize: 24,
+ blockSize: des.BlockSize,
+}, {
+ cipher: PEMCipherAES128,
+ name: "AES-128-CBC",
+ cipherFunc: aes.NewCipher,
+ keySize: 16,
+ blockSize: aes.BlockSize,
+}, {
+ cipher: PEMCipherAES192,
+ name: "AES-192-CBC",
+ cipherFunc: aes.NewCipher,
+ keySize: 24,
+ blockSize: aes.BlockSize,
+}, {
+ cipher: PEMCipherAES256,
+ name: "AES-256-CBC",
+ cipherFunc: aes.NewCipher,
+ keySize: 32,
+ blockSize: aes.BlockSize,
+},
+}
+
+// deriveKey uses a key derivation function to stretch the password into a key
+// with the number of bits our cipher requires. This algorithm was derived from
+// the OpenSSL source.
+func (c rfc1423Algo) deriveKey(password, salt []byte) []byte {
+ hash := md5.New()
+ out := make([]byte, c.keySize)
+ var digest []byte
+
+ for i := 0; i < len(out); i += len(digest) {
+ hash.Reset()
+ hash.Write(digest)
+ hash.Write(password)
+ hash.Write(salt)
+ digest = hash.Sum(digest[:0])
+ copy(out[i:], digest)
+ }
+ return out
+}
+
+// IsEncryptedPEMBlock returns if the PEM block is password encrypted.
+func IsEncryptedPEMBlock(b *pem.Block) bool {
+ _, ok := b.Headers["DEK-Info"]
+ return ok
+}
+
+// IncorrectPasswordError is returned when an incorrect password is detected.
+var IncorrectPasswordError = errors.New("x509: decryption password incorrect")
+
+// DecryptPEMBlock takes a password encrypted PEM block and the password used to
+// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects
+// the DEK-Info header to determine the algorithm used for decryption. If no
+// DEK-Info header is present, an error is returned. If an incorrect password
+// is detected an IncorrectPasswordError is returned.
+func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) {
+ dek, ok := b.Headers["DEK-Info"]
+ if !ok {
+ return nil, errors.New("x509: no DEK-Info header in block")
+ }
+
+ idx := strings.Index(dek, ",")
+ if idx == -1 {
+ return nil, errors.New("x509: malformed DEK-Info header")
+ }
+
+ mode, hexIV := dek[:idx], dek[idx+1:]
+ ciph := cipherByName(mode)
+ if ciph == nil {
+ return nil, errors.New("x509: unknown encryption mode")
+ }
+ iv, err := hex.DecodeString(hexIV)
+ if err != nil {
+ return nil, err
+ }
+ if len(iv) != ciph.blockSize {
+ return nil, errors.New("x509: incorrect IV size")
+ }
+
+ // Based on the OpenSSL implementation. The salt is the first 8 bytes
+ // of the initialization vector.
+ key := ciph.deriveKey(password, iv[:8])
+ block, err := ciph.cipherFunc(key)
+ if err != nil {
+ return nil, err
+ }
+
+ data := make([]byte, len(b.Bytes))
+ dec := cipher.NewCBCDecrypter(block, iv)
+ dec.CryptBlocks(data, b.Bytes)
+
+ // Blocks are padded using a scheme where the last n bytes of padding are all
+ // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423.
+ // For example:
+ // [x y z 2 2]
+ // [x y 7 7 7 7 7 7 7]
+ // If we detect a bad padding, we assume it is an invalid password.
+ dlen := len(data)
+ if dlen == 0 || dlen%ciph.blockSize != 0 {
+ return nil, errors.New("x509: invalid padding")
+ }
+ last := int(data[dlen-1])
+ if dlen < last {
+ return nil, IncorrectPasswordError
+ }
+ if last == 0 || last > ciph.blockSize {
+ return nil, IncorrectPasswordError
+ }
+ for _, val := range data[dlen-last:] {
+ if int(val) != last {
+ return nil, IncorrectPasswordError
+ }
+ }
+ return data[:dlen-last], nil
+}
+
+// EncryptPEMBlock returns a PEM block of the specified type holding the
+// given DER-encoded data encrypted with the specified algorithm and
+// password.
+func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) {
+ ciph := cipherByKey(alg)
+ if ciph == nil {
+ return nil, errors.New("x509: unknown encryption mode")
+ }
+ iv := make([]byte, ciph.blockSize)
+ if _, err := io.ReadFull(rand, iv); err != nil {
+ return nil, errors.New("x509: cannot generate IV: " + err.Error())
+ }
+ // The salt is the first 8 bytes of the initialization vector,
+ // matching the key derivation in DecryptPEMBlock.
+ key := ciph.deriveKey(password, iv[:8])
+ block, err := ciph.cipherFunc(key)
+ if err != nil {
+ return nil, err
+ }
+ enc := cipher.NewCBCEncrypter(block, iv)
+ pad := ciph.blockSize - len(data)%ciph.blockSize
+ encrypted := make([]byte, len(data), len(data)+pad)
+ // We could save this copy by encrypting all the whole blocks in
+ // the data separately, but it doesn't seem worth the additional
+ // code.
+ copy(encrypted, data)
+ // See RFC 1423, section 1.1
+ for i := 0; i < pad; i++ {
+ encrypted = append(encrypted, byte(pad))
+ }
+ enc.CryptBlocks(encrypted, encrypted)
+
+ return &pem.Block{
+ Type: blockType,
+ Headers: map[string]string{
+ "Proc-Type": "4,ENCRYPTED",
+ "DEK-Info": ciph.name + "," + hex.EncodeToString(iv),
+ },
+ Bytes: encrypted,
+ }, nil
+}
+
+func cipherByName(name string) *rfc1423Algo {
+ for i := range rfc1423Algos {
+ alg := &rfc1423Algos[i]
+ if alg.name == name {
+ return alg
+ }
+ }
+ return nil
+}
+
+func cipherByKey(key PEMCipher) *rfc1423Algo {
+ for i := range rfc1423Algos {
+ alg := &rfc1423Algos[i]
+ if alg.cipher == key {
+ return alg
+ }
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/pkcs1.go b/vendor/src/github.com/google/certificate-transparency/go/x509/pkcs1.go
new file mode 100755
index 0000000000..638bc67146
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/pkcs1.go
@@ -0,0 +1,124 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/rsa"
+ // START CT CHANGES
+ "github.com/google/certificate-transparency/go/asn1"
+ // END CT CHANGES
+ "errors"
+ "math/big"
+)
+
+// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key.
+type pkcs1PrivateKey struct {
+ Version int
+ N *big.Int
+ E int
+ D *big.Int
+ P *big.Int
+ Q *big.Int
+ // We ignore these values, if present, because rsa will calculate them.
+ Dp *big.Int `asn1:"optional"`
+ Dq *big.Int `asn1:"optional"`
+ Qinv *big.Int `asn1:"optional"`
+
+ AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"`
+}
+
+type pkcs1AdditionalRSAPrime struct {
+ Prime *big.Int
+
+ // We ignore these values because rsa will calculate them.
+ Exp *big.Int
+ Coeff *big.Int
+}
+
+// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form.
+func ParsePKCS1PrivateKey(der []byte) (key *rsa.PrivateKey, err error) {
+ var priv pkcs1PrivateKey
+ rest, err := asn1.Unmarshal(der, &priv)
+ if len(rest) > 0 {
+ err = asn1.SyntaxError{Msg: "trailing data"}
+ return
+ }
+ if err != nil {
+ return
+ }
+
+ if priv.Version > 1 {
+ return nil, errors.New("x509: unsupported private key version")
+ }
+
+ if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 {
+ return nil, errors.New("x509: private key contains zero or negative value")
+ }
+
+ key = new(rsa.PrivateKey)
+ key.PublicKey = rsa.PublicKey{
+ E: priv.E,
+ N: priv.N,
+ }
+
+ key.D = priv.D
+ key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes))
+ key.Primes[0] = priv.P
+ key.Primes[1] = priv.Q
+ for i, a := range priv.AdditionalPrimes {
+ if a.Prime.Sign() <= 0 {
+ return nil, errors.New("x509: private key contains zero or negative prime")
+ }
+ key.Primes[i+2] = a.Prime
+ // We ignore the other two values because rsa will calculate
+ // them as needed.
+ }
+
+ err = key.Validate()
+ if err != nil {
+ return nil, err
+ }
+ key.Precompute()
+
+ return
+}
+
+// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form.
+func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
+ key.Precompute()
+
+ version := 0
+ if len(key.Primes) > 2 {
+ version = 1
+ }
+
+ priv := pkcs1PrivateKey{
+ Version: version,
+ N: key.N,
+ E: key.PublicKey.E,
+ D: key.D,
+ P: key.Primes[0],
+ Q: key.Primes[1],
+ Dp: key.Precomputed.Dp,
+ Dq: key.Precomputed.Dq,
+ Qinv: key.Precomputed.Qinv,
+ }
+
+ priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues))
+ for i, values := range key.Precomputed.CRTValues {
+ priv.AdditionalPrimes[i].Prime = key.Primes[2+i]
+ priv.AdditionalPrimes[i].Exp = values.Exp
+ priv.AdditionalPrimes[i].Coeff = values.Coeff
+ }
+
+ b, _ := asn1.Marshal(priv)
+ return b
+}
+
+// rsaPublicKey reflects the ASN.1 structure of a PKCS#1 public key.
+type rsaPublicKey struct {
+ N *big.Int
+ E int
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/pkcs8.go b/vendor/src/github.com/google/certificate-transparency/go/x509/pkcs8.go
new file mode 100755
index 0000000000..c6ad70b3e6
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/pkcs8.go
@@ -0,0 +1,56 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ // START CT CHANGES
+ "github.com/google/certificate-transparency/go/asn1"
+ "github.com/google/certificate-transparency/go/x509/pkix"
+ // END CT CHANGES
+ "errors"
+ "fmt"
+)
+
+// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See
+// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn
+// and RFC5208.
+type pkcs8 struct {
+ Version int
+ Algo pkix.AlgorithmIdentifier
+ PrivateKey []byte
+ // optional attributes omitted.
+}
+
+// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key. See
+// http://www.rsa.com/rsalabs/node.asp?id=2130 and RFC5208.
+func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
+ var privKey pkcs8
+ if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ return nil, err
+ }
+ switch {
+ case privKey.Algo.Algorithm.Equal(oidPublicKeyRSA):
+ key, err = ParsePKCS1PrivateKey(privKey.PrivateKey)
+ if err != nil {
+ return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error())
+ }
+ return key, nil
+
+ case privKey.Algo.Algorithm.Equal(oidPublicKeyECDSA):
+ bytes := privKey.Algo.Parameters.FullBytes
+ namedCurveOID := new(asn1.ObjectIdentifier)
+ if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil {
+ namedCurveOID = nil
+ }
+ key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey)
+ if err != nil {
+ return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error())
+ }
+ return key, nil
+
+ default:
+ return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm)
+ }
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go b/vendor/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go
new file mode 100755
index 0000000000..67db6ae5e3
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go
@@ -0,0 +1,173 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkix contains shared, low level structures used for ASN.1 parsing
+// and serialization of X.509 certificates, CRL and OCSP.
+package pkix
+
+import (
+ // START CT CHANGES
+ "github.com/google/certificate-transparency/go/asn1"
+ // END CT CHANGES
+ "math/big"
+ "time"
+)
+
+// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.1.1.2.
+type AlgorithmIdentifier struct {
+ Algorithm asn1.ObjectIdentifier
+ Parameters asn1.RawValue `asn1:"optional"`
+}
+
+type RDNSequence []RelativeDistinguishedNameSET
+
+type RelativeDistinguishedNameSET []AttributeTypeAndValue
+
+// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
+// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
+type AttributeTypeAndValue struct {
+ Type asn1.ObjectIdentifier
+ Value interface{}
+}
+
+// Extension represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.2.
+type Extension struct {
+ Id asn1.ObjectIdentifier
+ Critical bool `asn1:"optional"`
+ Value []byte
+}
+
+// Name represents an X.509 distinguished name. This only includes the common
+// elements of a DN. Additional elements in the name are ignored.
+type Name struct {
+ Country, Organization, OrganizationalUnit []string
+ Locality, Province []string
+ StreetAddress, PostalCode []string
+ SerialNumber, CommonName string
+
+ Names []AttributeTypeAndValue
+}
+
+func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
+ for _, rdn := range *rdns {
+ if len(rdn) == 0 {
+ continue
+ }
+ atv := rdn[0]
+ n.Names = append(n.Names, atv)
+ value, ok := atv.Value.(string)
+ if !ok {
+ continue
+ }
+
+ t := atv.Type
+ if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
+ switch t[3] {
+ case 3:
+ n.CommonName = value
+ case 5:
+ n.SerialNumber = value
+ case 6:
+ n.Country = append(n.Country, value)
+ case 7:
+ n.Locality = append(n.Locality, value)
+ case 8:
+ n.Province = append(n.Province, value)
+ case 9:
+ n.StreetAddress = append(n.StreetAddress, value)
+ case 10:
+ n.Organization = append(n.Organization, value)
+ case 11:
+ n.OrganizationalUnit = append(n.OrganizationalUnit, value)
+ case 17:
+ n.PostalCode = append(n.PostalCode, value)
+ }
+ }
+ }
+}
+
+var (
+ oidCountry = []int{2, 5, 4, 6}
+ oidOrganization = []int{2, 5, 4, 10}
+ oidOrganizationalUnit = []int{2, 5, 4, 11}
+ oidCommonName = []int{2, 5, 4, 3}
+ oidSerialNumber = []int{2, 5, 4, 5}
+ oidLocality = []int{2, 5, 4, 7}
+ oidProvince = []int{2, 5, 4, 8}
+ oidStreetAddress = []int{2, 5, 4, 9}
+ oidPostalCode = []int{2, 5, 4, 17}
+)
+
+// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
+// and returns the new value. The relativeDistinguishedNameSET contains an
+// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
+// search for AttributeTypeAndValue.
+func appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
+ if len(values) == 0 {
+ return in
+ }
+
+ s := make([]AttributeTypeAndValue, len(values))
+ for i, value := range values {
+ s[i].Type = oid
+ s[i].Value = value
+ }
+
+ return append(in, s)
+}
+
+func (n Name) ToRDNSequence() (ret RDNSequence) {
+ ret = appendRDNs(ret, n.Country, oidCountry)
+ ret = appendRDNs(ret, n.Organization, oidOrganization)
+ ret = appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
+ ret = appendRDNs(ret, n.Locality, oidLocality)
+ ret = appendRDNs(ret, n.Province, oidProvince)
+ ret = appendRDNs(ret, n.StreetAddress, oidStreetAddress)
+ ret = appendRDNs(ret, n.PostalCode, oidPostalCode)
+ if len(n.CommonName) > 0 {
+ ret = appendRDNs(ret, []string{n.CommonName}, oidCommonName)
+ }
+ if len(n.SerialNumber) > 0 {
+ ret = appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber)
+ }
+
+ return ret
+}
+
+// CertificateList represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
+// signature.
+type CertificateList struct {
+ TBSCertList TBSCertificateList
+ SignatureAlgorithm AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+// HasExpired reports whether now is past the expiry time of certList.
+func (certList *CertificateList) HasExpired(now time.Time) bool {
+ return now.After(certList.TBSCertList.NextUpdate)
+}
+
+// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1.
+type TBSCertificateList struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,default:2"`
+ Signature AlgorithmIdentifier
+ Issuer RDNSequence
+ ThisUpdate time.Time
+ NextUpdate time.Time
+ RevokedCertificates []RevokedCertificate `asn1:"optional"`
+ Extensions []Extension `asn1:"tag:0,optional,explicit"`
+}
+
+// RevokedCertificate represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1.
+type RevokedCertificate struct {
+ SerialNumber *big.Int
+ RevocationTime time.Time
+ Extensions []Extension `asn1:"optional"`
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/root.go b/vendor/src/github.com/google/certificate-transparency/go/x509/root.go
new file mode 100755
index 0000000000..8aae14e09e
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/root.go
@@ -0,0 +1,17 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import "sync"
+
+var (
+ once sync.Once
+ systemRoots *CertPool
+)
+
+func systemRootsPool() *CertPool {
+ once.Do(initSystemRoots)
+ return systemRoots
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/root_darwin.go b/vendor/src/github.com/google/certificate-transparency/go/x509/root_darwin.go
new file mode 100755
index 0000000000..3d3fa4e4d1
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/root_darwin.go
@@ -0,0 +1,83 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin,cgo
+
+package x509
+
+/*
+#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060
+#cgo LDFLAGS: -framework CoreFoundation -framework Security
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <Security/Security.h>
+
+// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates.
+//
+// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
+// certificates of the system. On failure, the function returns -1.
+//
+// Note: The CFDataRef returned in pemRoots must be released (using CFRelease) after
+// we've consumed its content.
+int FetchPEMRootsCTX509(CFDataRef *pemRoots) {
+ if (pemRoots == NULL) {
+ return -1;
+ }
+
+ CFArrayRef certs = NULL;
+ OSStatus err = SecTrustCopyAnchorCertificates(&certs);
+ if (err != noErr) {
+ return -1;
+ }
+
+ CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
+ int i, ncerts = CFArrayGetCount(certs);
+ for (i = 0; i < ncerts; i++) {
+ CFDataRef data = NULL;
+ SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
+ if (cert == NULL) {
+ continue;
+ }
+
+ // Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
+ // Once we support weak imports via cgo we should prefer that, and fall back to this
+ // for older systems.
+ err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
+ if (err != noErr) {
+ continue;
+ }
+
+ if (data != NULL) {
+ CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
+ CFRelease(data);
+ }
+ }
+
+ CFRelease(certs);
+
+ *pemRoots = combinedData;
+ return 0;
+}
+*/
+import "C"
+import "unsafe"
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
+
+func initSystemRoots() {
+ roots := NewCertPool()
+
+ var data C.CFDataRef = nil
+ err := C.FetchPEMRootsCTX509(&data)
+ if err == -1 {
+ return
+ }
+
+ defer C.CFRelease(C.CFTypeRef(data))
+ buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
+ roots.AppendCertsFromPEM(buf)
+ systemRoots = roots
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/root_plan9.go b/vendor/src/github.com/google/certificate-transparency/go/x509/root_plan9.go
new file mode 100755
index 0000000000..9965caadee
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/root_plan9.go
@@ -0,0 +1,33 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build plan9
+
+package x509
+
+import "io/ioutil"
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/sys/lib/tls/ca.pem",
+}
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
+
+func initSystemRoots() {
+ roots := NewCertPool()
+ for _, file := range certFiles {
+ data, err := ioutil.ReadFile(file)
+ if err == nil {
+ roots.AppendCertsFromPEM(data)
+ systemRoots = roots
+ return
+ }
+ }
+
+ // All of the files failed to load. systemRoots will be nil which will
+ // trigger a specific error at verification time.
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/root_stub.go b/vendor/src/github.com/google/certificate-transparency/go/x509/root_stub.go
new file mode 100755
index 0000000000..4c742ccc37
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/root_stub.go
@@ -0,0 +1,14 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin,!cgo
+
+package x509
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
+
+func initSystemRoots() {
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/root_unix.go b/vendor/src/github.com/google/certificate-transparency/go/x509/root_unix.go
new file mode 100755
index 0000000000..324f855b13
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/root_unix.go
@@ -0,0 +1,37 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd linux openbsd netbsd
+
+package x509
+
+import "io/ioutil"
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
+ "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
+ "/etc/ssl/ca-bundle.pem", // OpenSUSE
+ "/etc/ssl/cert.pem", // OpenBSD
+ "/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly
+}
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
+
+func initSystemRoots() {
+ roots := NewCertPool()
+ for _, file := range certFiles {
+ data, err := ioutil.ReadFile(file)
+ if err == nil {
+ roots.AppendCertsFromPEM(data)
+ systemRoots = roots
+ return
+ }
+ }
+
+ // All of the files failed to load. systemRoots will be nil which will
+ // trigger a specific error at verification time.
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/root_windows.go b/vendor/src/github.com/google/certificate-transparency/go/x509/root_windows.go
new file mode 100755
index 0000000000..81018b78fe
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/root_windows.go
@@ -0,0 +1,229 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "errors"
+ "syscall"
+ "unsafe"
+)
+
+// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory
+// certificate store containing itself and all of the intermediate certificates specified
+// in the opts.Intermediates CertPool.
+//
+// A pointer to the in-memory store is available in the returned CertContext's Store field.
+// The store is automatically freed when the CertContext is freed using
+// syscall.CertFreeCertificateContext.
+func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) {
+ var storeCtx *syscall.CertContext
+
+ leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw)))
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertFreeCertificateContext(leafCtx)
+
+ handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertCloseStore(handle, 0)
+
+ err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.Intermediates != nil {
+ for _, intermediate := range opts.Intermediates.certs {
+ ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw)))
+ if err != nil {
+ return nil, err
+ }
+
+ err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil)
+ syscall.CertFreeCertificateContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return storeCtx, nil
+}
+
+// extractSimpleChain extracts the final certificate chain from a CertSimpleChain.
+func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) {
+ if simpleChain == nil || count == 0 {
+ return nil, errors.New("x509: invalid simple chain")
+ }
+
+ simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:]
+ lastChain := simpleChains[count-1]
+ elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:]
+ for i := 0; i < int(lastChain.NumElements); i++ {
+ // Copy the buf, since ParseCertificate does not create its own copy.
+ cert := elements[i].CertContext
+ encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
+ buf := make([]byte, cert.Length)
+ copy(buf, encodedCert[:])
+ parsedCert, err := ParseCertificate(buf)
+ if err != nil {
+ return nil, err
+ }
+ chain = append(chain, parsedCert)
+ }
+
+ return chain, nil
+}
+
+// checkChainTrustStatus checks the trust status of the certificate chain, translating
+// any errors it finds into Go errors in the process.
+func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error {
+ if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR {
+ status := chainCtx.TrustStatus.ErrorStatus
+ switch status {
+ case syscall.CERT_TRUST_IS_NOT_TIME_VALID:
+ return CertificateInvalidError{c, Expired}
+ default:
+ return UnknownAuthorityError{c, nil, nil}
+ }
+ }
+ return nil
+}
+
+// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for
+// use as a certificate chain for a SSL/TLS server.
+func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error {
+ servernamep, err := syscall.UTF16PtrFromString(opts.DNSName)
+ if err != nil {
+ return err
+ }
+ sslPara := &syscall.SSLExtraCertChainPolicyPara{
+ AuthType: syscall.AUTHTYPE_SERVER,
+ ServerName: servernamep,
+ }
+ sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
+
+ para := &syscall.CertChainPolicyPara{
+ ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)),
+ }
+ para.Size = uint32(unsafe.Sizeof(*para))
+
+ status := syscall.CertChainPolicyStatus{}
+ err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status)
+ if err != nil {
+ return err
+ }
+
+ // TODO(mkrautz): use the lChainIndex and lElementIndex fields
+ // of the CertChainPolicyStatus to provide proper context, instead
+ // using c.
+ if status.Error != 0 {
+ switch status.Error {
+ case syscall.CERT_E_EXPIRED:
+ return CertificateInvalidError{c, Expired}
+ case syscall.CERT_E_CN_NO_MATCH:
+ return HostnameError{c, opts.DNSName}
+ case syscall.CERT_E_UNTRUSTEDROOT:
+ return UnknownAuthorityError{c, nil, nil}
+ default:
+ return UnknownAuthorityError{c, nil, nil}
+ }
+ }
+
+ return nil
+}
+
+// systemVerify is like Verify, except that it uses CryptoAPI calls
+// to build certificate chains and verify them.
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ hasDNSName := opts != nil && len(opts.DNSName) > 0
+
+ storeCtx, err := createStoreContext(c, opts)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertFreeCertificateContext(storeCtx)
+
+ para := new(syscall.CertChainPara)
+ para.Size = uint32(unsafe.Sizeof(*para))
+
+ // If there's a DNSName set in opts, assume we're verifying
+ // a certificate from a TLS server.
+ if hasDNSName {
+ oids := []*byte{
+ &syscall.OID_PKIX_KP_SERVER_AUTH[0],
+ // Both IE and Chrome allow certificates with
+ // Server Gated Crypto as well. Some certificates
+ // in the wild require them.
+ &syscall.OID_SERVER_GATED_CRYPTO[0],
+ &syscall.OID_SGC_NETSCAPE[0],
+ }
+ para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR
+ para.RequestedUsage.Usage.Length = uint32(len(oids))
+ para.RequestedUsage.Usage.UsageIdentifiers = &oids[0]
+ } else {
+ para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND
+ para.RequestedUsage.Usage.Length = 0
+ para.RequestedUsage.Usage.UsageIdentifiers = nil
+ }
+
+ var verifyTime *syscall.Filetime
+ if opts != nil && !opts.CurrentTime.IsZero() {
+ ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano())
+ verifyTime = &ft
+ }
+
+ // CertGetCertificateChain will traverse Windows's root stores
+ // in an attempt to build a verified certificate chain. Once
+ // it has found a verified chain, it stops. MSDN docs on
+ // CERT_CHAIN_CONTEXT:
+ //
+ // When a CERT_CHAIN_CONTEXT is built, the first simple chain
+ // begins with an end certificate and ends with a self-signed
+ // certificate. If that self-signed certificate is not a root
+ // or otherwise trusted certificate, an attempt is made to
+ // build a new chain. CTLs are used to create the new chain
+ // beginning with the self-signed certificate from the original
+ // chain as the end certificate of the new chain. This process
+ // continues building additional simple chains until the first
+ // self-signed certificate is a trusted certificate or until
+ // an additional simple chain cannot be built.
+ //
+ // The result is that we'll only get a single trusted chain to
+ // return to our caller.
+ var chainCtx *syscall.CertChainContext
+ err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertFreeCertificateChain(chainCtx)
+
+ err = checkChainTrustStatus(c, chainCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ if hasDNSName {
+ err = checkChainSSLServerPolicy(c, chainCtx, opts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount))
+ if err != nil {
+ return nil, err
+ }
+
+ chains = append(chains, chain)
+
+ return chains, nil
+}
+
+func initSystemRoots() {
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/sec1.go b/vendor/src/github.com/google/certificate-transparency/go/x509/sec1.go
new file mode 100755
index 0000000000..dbc255f0ca
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/sec1.go
@@ -0,0 +1,85 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ // START CT CHANGES
+ "github.com/google/certificate-transparency/go/asn1"
+ // START CT CHANGES
+ "errors"
+ "fmt"
+ "math/big"
+)
+
+const ecPrivKeyVersion = 1
+
+// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
+// References:
+// RFC5915
+// SEC1 - http://www.secg.org/download/aid-780/sec1-v2.pdf
+// Per RFC5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
+// most cases it is not.
+type ecPrivateKey struct {
+ Version int
+ PrivateKey []byte
+ NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
+ PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
+}
+
+// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
+func ParseECPrivateKey(der []byte) (key *ecdsa.PrivateKey, err error) {
+ return parseECPrivateKey(nil, der)
+}
+
+// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format.
+func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ oid, ok := oidFromNamedCurve(key.Curve)
+ if !ok {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+ return asn1.Marshal(ecPrivateKey{
+ Version: 1,
+ PrivateKey: key.D.Bytes(),
+ NamedCurveOID: oid,
+ PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
+ })
+}
+
+// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
+// The OID for the named curve may be provided from another source (such as
+// the PKCS8 container) - if it is provided then use this instead of the OID
+// that may exist in the EC private key structure.
+func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
+ var privKey ecPrivateKey
+ if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
+ }
+ if privKey.Version != ecPrivKeyVersion {
+ return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
+ }
+
+ var curve elliptic.Curve
+ if namedCurveOID != nil {
+ curve = namedCurveFromOID(*namedCurveOID)
+ } else {
+ curve = namedCurveFromOID(privKey.NamedCurveOID)
+ }
+ if curve == nil {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+
+ k := new(big.Int).SetBytes(privKey.PrivateKey)
+ if k.Cmp(curve.Params().N) >= 0 {
+ return nil, errors.New("x509: invalid elliptic curve private key value")
+ }
+ priv := new(ecdsa.PrivateKey)
+ priv.Curve = curve
+ priv.D = k
+ priv.X, priv.Y = curve.ScalarBaseMult(privKey.PrivateKey)
+
+ return priv, nil
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/verify.go b/vendor/src/github.com/google/certificate-transparency/go/x509/verify.go
new file mode 100755
index 0000000000..e82dbc12b0
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/verify.go
@@ -0,0 +1,476 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "fmt"
+ "net"
+ "runtime"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type InvalidReason int
+
+const (
+ // NotAuthorizedToSign results when a certificate is signed by another
+ // which isn't marked as a CA certificate.
+ NotAuthorizedToSign InvalidReason = iota
+ // Expired results when a certificate has expired, based on the time
+ // given in the VerifyOptions.
+ Expired
+ // CANotAuthorizedForThisName results when an intermediate or root
+ // certificate has a name constraint which doesn't include the name
+ // being checked.
+ CANotAuthorizedForThisName
+ // TooManyIntermediates results when a path length constraint is
+ // violated.
+ TooManyIntermediates
+ // IncompatibleUsage results when the certificate's key usage indicates
+ // that it may only be used for a different purpose.
+ IncompatibleUsage
+)
+
+// CertificateInvalidError results when an odd error occurs. Users of this
+// library probably want to handle all these errors uniformly.
+type CertificateInvalidError struct {
+ Cert *Certificate
+ Reason InvalidReason
+}
+
+func (e CertificateInvalidError) Error() string {
+ switch e.Reason {
+ case NotAuthorizedToSign:
+ return "x509: certificate is not authorized to sign other certificates"
+ case Expired:
+ return "x509: certificate has expired or is not yet valid"
+ case CANotAuthorizedForThisName:
+ return "x509: a root or intermediate certificate is not authorized to sign in this domain"
+ case TooManyIntermediates:
+ return "x509: too many intermediates for path length constraint"
+ case IncompatibleUsage:
+ return "x509: certificate specifies an incompatible key usage"
+ }
+ return "x509: unknown error"
+}
+
+// HostnameError results when the set of authorized names doesn't match the
+// requested name.
+type HostnameError struct {
+ Certificate *Certificate
+ Host string
+}
+
+func (h HostnameError) Error() string {
+ c := h.Certificate
+
+ var valid string
+ if ip := net.ParseIP(h.Host); ip != nil {
+ // Trying to validate an IP
+ if len(c.IPAddresses) == 0 {
+ return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
+ }
+ for _, san := range c.IPAddresses {
+ if len(valid) > 0 {
+ valid += ", "
+ }
+ valid += san.String()
+ }
+ } else {
+ if len(c.DNSNames) > 0 {
+ valid = strings.Join(c.DNSNames, ", ")
+ } else {
+ valid = c.Subject.CommonName
+ }
+ }
+ return "x509: certificate is valid for " + valid + ", not " + h.Host
+}
+
+// UnknownAuthorityError results when the certificate issuer is unknown
+type UnknownAuthorityError struct {
+ cert *Certificate
+ // hintErr contains an error that may be helpful in determining why an
+ // authority wasn't found.
+ hintErr error
+ // hintCert contains a possible authority certificate that was rejected
+ // because of the error in hintErr.
+ hintCert *Certificate
+}
+
+func (e UnknownAuthorityError) Error() string {
+ s := "x509: certificate signed by unknown authority"
+ if e.hintErr != nil {
+ certName := e.hintCert.Subject.CommonName
+ if len(certName) == 0 {
+ if len(e.hintCert.Subject.Organization) > 0 {
+ certName = e.hintCert.Subject.Organization[0]
+ }
+ certName = "serial:" + e.hintCert.SerialNumber.String()
+ }
+ s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName)
+ }
+ return s
+}
+
+// SystemRootsError results when we fail to load the system root certificates.
+type SystemRootsError struct {
+}
+
+func (e SystemRootsError) Error() string {
+ return "x509: failed to load system roots and no roots provided"
+}
+
+// VerifyOptions contains parameters for Certificate.Verify. It's a structure
+// because other PKIX verification APIs have ended up needing many options.
+type VerifyOptions struct {
+ DNSName string
+ Intermediates *CertPool
+ Roots *CertPool // if nil, the system roots are used
+ CurrentTime time.Time // if zero, the current time is used
+ DisableTimeChecks bool
+ // KeyUsage specifies which Extended Key Usage values are acceptable.
+ // An empty list means ExtKeyUsageServerAuth. Key usage is considered a
+ // constraint down the chain which mirrors Windows CryptoAPI behaviour,
+ // but not the spec. To accept any key usage, include ExtKeyUsageAny.
+ KeyUsages []ExtKeyUsage
+}
+
+const (
+ leafCertificate = iota
+ intermediateCertificate
+ rootCertificate
+)
+
+// isValid performs validity checks on the c.
+func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
+ if !opts.DisableTimeChecks {
+ now := opts.CurrentTime
+ if now.IsZero() {
+ now = time.Now()
+ }
+ if now.Before(c.NotBefore) || now.After(c.NotAfter) {
+ return CertificateInvalidError{c, Expired}
+ }
+ }
+
+ if len(c.PermittedDNSDomains) > 0 {
+ ok := false
+ for _, domain := range c.PermittedDNSDomains {
+ if opts.DNSName == domain ||
+ (strings.HasSuffix(opts.DNSName, domain) &&
+ len(opts.DNSName) >= 1+len(domain) &&
+ opts.DNSName[len(opts.DNSName)-len(domain)-1] == '.') {
+ ok = true
+ break
+ }
+ }
+
+ if !ok {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName}
+ }
+ }
+
+ // KeyUsage status flags are ignored. From Engineering Security, Peter
+ // Gutmann: A European government CA marked its signing certificates as
+ // being valid for encryption only, but no-one noticed. Another
+ // European CA marked its signature keys as not being valid for
+ // signatures. A different CA marked its own trusted root certificate
+ // as being invalid for certificate signing. Another national CA
+ // distributed a certificate to be used to encrypt data for the
+ // country’s tax authority that was marked as only being usable for
+ // digital signatures but not for encryption. Yet another CA reversed
+ // the order of the bit flags in the keyUsage due to confusion over
+ // encoding endianness, essentially setting a random keyUsage in
+ // certificates that it issued. Another CA created a self-invalidating
+ // certificate by adding a certificate policy statement stipulating
+ // that the certificate had to be used strictly as specified in the
+ // keyUsage, and a keyUsage containing a flag indicating that the RSA
+ // encryption key could only be used for Diffie-Hellman key agreement.
+
+ if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) {
+ return CertificateInvalidError{c, NotAuthorizedToSign}
+ }
+
+ if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
+ numIntermediates := len(currentChain) - 1
+ if numIntermediates > c.MaxPathLen {
+ return CertificateInvalidError{c, TooManyIntermediates}
+ }
+ }
+
+ return nil
+}
+
+// Verify attempts to verify c by building one or more chains from c to a
+// certificate in opts.Roots, using certificates in opts.Intermediates if
+// needed. If successful, it returns one or more chains where the first
+// element of the chain is c and the last element is from opts.Roots.
+//
+// WARNING: this doesn't do any revocation checking.
+func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
+ // Use Windows's own verification and chain building.
+ if opts.Roots == nil && runtime.GOOS == "windows" {
+ return c.systemVerify(&opts)
+ }
+
+ if opts.Roots == nil {
+ opts.Roots = systemRootsPool()
+ if opts.Roots == nil {
+ return nil, SystemRootsError{}
+ }
+ }
+
+ err = c.isValid(leafCertificate, nil, &opts)
+ if err != nil {
+ return
+ }
+
+ if len(opts.DNSName) > 0 {
+ err = c.VerifyHostname(opts.DNSName)
+ if err != nil {
+ return
+ }
+ }
+
+ candidateChains, err := c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts)
+ if err != nil {
+ return
+ }
+
+ keyUsages := opts.KeyUsages
+ if len(keyUsages) == 0 {
+ keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
+ }
+
+ // If any key usage is acceptable then we're done.
+ for _, usage := range keyUsages {
+ if usage == ExtKeyUsageAny {
+ chains = candidateChains
+ return
+ }
+ }
+
+ for _, candidate := range candidateChains {
+ if checkChainForKeyUsage(candidate, keyUsages) {
+ chains = append(chains, candidate)
+ }
+ }
+
+ if len(chains) == 0 {
+ err = CertificateInvalidError{c, IncompatibleUsage}
+ }
+
+ return
+}
+
+func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
+ n := make([]*Certificate, len(chain)+1)
+ copy(n, chain)
+ n[len(chain)] = cert
+ return n
+}
+
+func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c)
+ for _, rootNum := range possibleRoots {
+ root := opts.Roots.certs[rootNum]
+ err = root.isValid(rootCertificate, currentChain, opts)
+ if err != nil {
+ continue
+ }
+ chains = append(chains, appendToFreshChain(currentChain, root))
+ }
+
+ possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c)
+nextIntermediate:
+ for _, intermediateNum := range possibleIntermediates {
+ intermediate := opts.Intermediates.certs[intermediateNum]
+ for _, cert := range currentChain {
+ if cert == intermediate {
+ continue nextIntermediate
+ }
+ }
+ err = intermediate.isValid(intermediateCertificate, currentChain, opts)
+ if err != nil {
+ continue
+ }
+ var childChains [][]*Certificate
+ childChains, ok := cache[intermediateNum]
+ if !ok {
+ childChains, err = intermediate.buildChains(cache, appendToFreshChain(currentChain, intermediate), opts)
+ cache[intermediateNum] = childChains
+ }
+ chains = append(chains, childChains...)
+ }
+
+ if len(chains) > 0 {
+ err = nil
+ }
+
+ if len(chains) == 0 && err == nil {
+ hintErr := rootErr
+ hintCert := failedRoot
+ if hintErr == nil {
+ hintErr = intermediateErr
+ hintCert = failedIntermediate
+ }
+ err = UnknownAuthorityError{c, hintErr, hintCert}
+ }
+
+ return
+}
+
+func matchHostnames(pattern, host string) bool {
+ if len(pattern) == 0 || len(host) == 0 {
+ return false
+ }
+
+ patternParts := strings.Split(pattern, ".")
+ hostParts := strings.Split(host, ".")
+
+ if len(patternParts) != len(hostParts) {
+ return false
+ }
+
+ for i, patternPart := range patternParts {
+ if patternPart == "*" {
+ continue
+ }
+ if patternPart != hostParts[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
+// an explicitly ASCII function to avoid any sharp corners resulting from
+// performing Unicode operations on DNS labels.
+func toLowerCaseASCII(in string) string {
+ // If the string is already lower-case then there's nothing to do.
+ isAlreadyLowerCase := true
+ for _, c := range in {
+ if c == utf8.RuneError {
+ // If we get a UTF-8 error then there might be
+ // upper-case ASCII bytes in the invalid sequence.
+ isAlreadyLowerCase = false
+ break
+ }
+ if 'A' <= c && c <= 'Z' {
+ isAlreadyLowerCase = false
+ break
+ }
+ }
+
+ if isAlreadyLowerCase {
+ return in
+ }
+
+ out := []byte(in)
+ for i, c := range out {
+ if 'A' <= c && c <= 'Z' {
+ out[i] += 'a' - 'A'
+ }
+ }
+ return string(out)
+}
+
+// VerifyHostname returns nil if c is a valid certificate for the named host.
+// Otherwise it returns an error describing the mismatch.
+func (c *Certificate) VerifyHostname(h string) error {
+ // IP addresses may be written in [ ].
+ candidateIP := h
+ if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' {
+ candidateIP = h[1 : len(h)-1]
+ }
+ if ip := net.ParseIP(candidateIP); ip != nil {
+ // We only match IP addresses against IP SANs.
+ // https://tools.ietf.org/html/rfc6125#appendix-B.2
+ for _, candidate := range c.IPAddresses {
+ if ip.Equal(candidate) {
+ return nil
+ }
+ }
+ return HostnameError{c, candidateIP}
+ }
+
+ lowered := toLowerCaseASCII(h)
+
+ if len(c.DNSNames) > 0 {
+ for _, match := range c.DNSNames {
+ if matchHostnames(toLowerCaseASCII(match), lowered) {
+ return nil
+ }
+ }
+ // If Subject Alt Name is given, we ignore the common name.
+ } else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
+ return nil
+ }
+
+ return HostnameError{c, h}
+}
+
+func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
+ usages := make([]ExtKeyUsage, len(keyUsages))
+ copy(usages, keyUsages)
+
+ if len(chain) == 0 {
+ return false
+ }
+
+ usagesRemaining := len(usages)
+
+ // We walk down the list and cross out any usages that aren't supported
+ // by each certificate. If we cross out all the usages, then the chain
+ // is unacceptable.
+
+ for i := len(chain) - 1; i >= 0; i-- {
+ cert := chain[i]
+ if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
+ // The certificate doesn't have any extended key usage specified.
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if usage == ExtKeyUsageAny {
+ // The certificate is explicitly good for any usage.
+ continue
+ }
+ }
+
+ const invalidUsage ExtKeyUsage = -1
+
+ NextRequestedUsage:
+ for i, requestedUsage := range usages {
+ if requestedUsage == invalidUsage {
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if requestedUsage == usage {
+ continue NextRequestedUsage
+ } else if requestedUsage == ExtKeyUsageServerAuth &&
+ (usage == ExtKeyUsageNetscapeServerGatedCrypto ||
+ usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
+ // In order to support COMODO
+ // certificate chains, we have to
+ // accept Netscape or Microsoft SGC
+ // usages as equal to ServerAuth.
+ continue NextRequestedUsage
+ }
+ }
+
+ usages[i] = invalidUsage
+ usagesRemaining--
+ if usagesRemaining == 0 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
diff --git a/vendor/src/github.com/google/certificate-transparency/go/x509/x509.go b/vendor/src/github.com/google/certificate-transparency/go/x509/x509.go
new file mode 100755
index 0000000000..cda72200f8
--- /dev/null
+++ b/vendor/src/github.com/google/certificate-transparency/go/x509/x509.go
@@ -0,0 +1,1622 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package x509 parses X.509-encoded keys and certificates.
+//
+// START CT CHANGES
+// This is a fork of the go library crypto/x509 package, it's more relaxed
+// about certificates that it'll accept, and exports the TBSCertificate
+// structure.
+// END CT CHANGES
+package x509
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha1"
+ // START CT CHANGES
+ "github.com/google/certificate-transparency/go/asn1"
+ "github.com/google/certificate-transparency/go/x509/pkix"
+ // END CT CHANGES
+ "encoding/pem"
+ "errors"
+ // START CT CHANGES
+ "fmt"
+ // END CT CHANGES
+ "io"
+ "math/big"
+ "net"
+ "time"
+)
+
+// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
+// in RFC 3280.
+type pkixPublicKey struct {
+ Algo pkix.AlgorithmIdentifier
+ BitString asn1.BitString
+}
+
+// ParsePKIXPublicKey parses a DER encoded public key. These values are
+// typically found in PEM blocks with "BEGIN PUBLIC KEY".
+func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
+ var pki publicKeyInfo
+ if _, err = asn1.Unmarshal(derBytes, &pki); err != nil {
+ return
+ }
+ algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
+ if algo == UnknownPublicKeyAlgorithm {
+ return nil, errors.New("x509: unknown public key algorithm")
+ }
+ return parsePublicKey(algo, &pki)
+}
+
+func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ publicKeyBytes, err = asn1.Marshal(rsaPublicKey{
+ N: pub.N,
+ E: pub.E,
+ })
+ publicKeyAlgorithm.Algorithm = oidPublicKeyRSA
+ // This is a NULL parameters value which is technically
+ // superfluous, but most other code includes it and, by
+ // doing this, we match their public key hashes.
+ publicKeyAlgorithm.Parameters = asn1.RawValue{
+ Tag: 5,
+ }
+ case *ecdsa.PublicKey:
+ publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
+ oid, ok := oidFromNamedCurve(pub.Curve)
+ if !ok {
+ return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve")
+ }
+ publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA
+ var paramBytes []byte
+ paramBytes, err = asn1.Marshal(oid)
+ if err != nil {
+ return
+ }
+ publicKeyAlgorithm.Parameters.FullBytes = paramBytes
+ default:
+ return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: only RSA and ECDSA public keys supported")
+ }
+
+ return publicKeyBytes, publicKeyAlgorithm, nil
+}
+
+// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format.
+func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) {
+ var publicKeyBytes []byte
+ var publicKeyAlgorithm pkix.AlgorithmIdentifier
+ var err error
+
+ if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil {
+ return nil, err
+ }
+
+ pkix := pkixPublicKey{
+ Algo: publicKeyAlgorithm,
+ BitString: asn1.BitString{
+ Bytes: publicKeyBytes,
+ BitLength: 8 * len(publicKeyBytes),
+ },
+ }
+
+ ret, _ := asn1.Marshal(pkix)
+ return ret, nil
+}
+
+// These structures reflect the ASN.1 structure of X.509 certificates.:
+
+type certificate struct {
+ Raw asn1.RawContent
+ TBSCertificate tbsCertificate
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+type tbsCertificate struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,explicit,default:1,tag:0"`
+ SerialNumber *big.Int
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ Issuer asn1.RawValue
+ Validity validity
+ Subject asn1.RawValue
+ PublicKey publicKeyInfo
+ UniqueId asn1.BitString `asn1:"optional,tag:1"`
+ SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"`
+ Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"`
+}
+
+type dsaAlgorithmParameters struct {
+ P, Q, G *big.Int
+}
+
+type dsaSignature struct {
+ R, S *big.Int
+}
+
+type ecdsaSignature dsaSignature
+
+type validity struct {
+ NotBefore, NotAfter time.Time
+}
+
+type publicKeyInfo struct {
+ Raw asn1.RawContent
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+}
+
+// RFC 5280, 4.2.1.1
+type authKeyId struct {
+ Id []byte `asn1:"optional,tag:0"`
+}
+
+type SignatureAlgorithm int
+
+const (
+ UnknownSignatureAlgorithm SignatureAlgorithm = iota
+ MD2WithRSA
+ MD5WithRSA
+ SHA1WithRSA
+ SHA256WithRSA
+ SHA384WithRSA
+ SHA512WithRSA
+ DSAWithSHA1
+ DSAWithSHA256
+ ECDSAWithSHA1
+ ECDSAWithSHA256
+ ECDSAWithSHA384
+ ECDSAWithSHA512
+)
+
+type PublicKeyAlgorithm int
+
+const (
+ UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
+ RSA
+ DSA
+ ECDSA
+)
+
+// OIDs for signature algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
+//
+//
+// RFC 3279 2.2.1 RSA Signature Algorithms
+//
+// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
+//
+// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
+//
+// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
+//
+// dsaWithSha1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
+//
+// RFC 3279 2.2.3 ECDSA Signature Algorithm
+//
+// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-x962(10045)
+// signatures(4) ecdsa-with-SHA1(1)}
+//
+//
+// RFC 4055 5 PKCS #1 Version 1.5
+//
+// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
+//
+// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
+//
+// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
+//
+//
+// RFC 5758 3.1 DSA Signature Algorithms
+//
+// dsaWithSha256 OBJECT IDENTIFIER ::= {
+// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
+//
+// RFC 5758 3.2 ECDSA Signature Algorithm
+//
+// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 }
+//
+// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 }
+//
+// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
+
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+)
+
+func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) SignatureAlgorithm {
+ switch {
+ case oid.Equal(oidSignatureMD2WithRSA):
+ return MD2WithRSA
+ case oid.Equal(oidSignatureMD5WithRSA):
+ return MD5WithRSA
+ case oid.Equal(oidSignatureSHA1WithRSA):
+ return SHA1WithRSA
+ case oid.Equal(oidSignatureSHA256WithRSA):
+ return SHA256WithRSA
+ case oid.Equal(oidSignatureSHA384WithRSA):
+ return SHA384WithRSA
+ case oid.Equal(oidSignatureSHA512WithRSA):
+ return SHA512WithRSA
+ case oid.Equal(oidSignatureDSAWithSHA1):
+ return DSAWithSHA1
+ case oid.Equal(oidSignatureDSAWithSHA256):
+ return DSAWithSHA256
+ case oid.Equal(oidSignatureECDSAWithSHA1):
+ return ECDSAWithSHA1
+ case oid.Equal(oidSignatureECDSAWithSHA256):
+ return ECDSAWithSHA256
+ case oid.Equal(oidSignatureECDSAWithSHA384):
+ return ECDSAWithSHA384
+ case oid.Equal(oidSignatureECDSAWithSHA512):
+ return ECDSAWithSHA512
+ }
+ return UnknownSignatureAlgorithm
+}
+
+// RFC 3279, 2.3 Public Key Algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+// rsadsi(113549) pkcs(1) 1 }
+//
+// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
+//
+// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+// x9-57(10040) x9cm(4) 1 }
+//
+// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
+//
+// id-ecPublicKey OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
+var (
+ oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+ oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
+ oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
+)
+
+func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
+ switch {
+ case oid.Equal(oidPublicKeyRSA):
+ return RSA
+ case oid.Equal(oidPublicKeyDSA):
+ return DSA
+ case oid.Equal(oidPublicKeyECDSA):
+ return ECDSA
+ }
+ return UnknownPublicKeyAlgorithm
+}
+
+// RFC 5480, 2.1.1.1. Named Curve
+//
+// secp224r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
+//
+// secp256r1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
+// prime(1) 7 }
+//
+// secp384r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
+//
+// secp521r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
+//
+// NB: secp256r1 is equivalent to prime256v1
+var (
+ oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
+ oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
+ oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
+ oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
+)
+
+func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
+ switch {
+ case oid.Equal(oidNamedCurveP224):
+ return elliptic.P224()
+ case oid.Equal(oidNamedCurveP256):
+ return elliptic.P256()
+ case oid.Equal(oidNamedCurveP384):
+ return elliptic.P384()
+ case oid.Equal(oidNamedCurveP521):
+ return elliptic.P521()
+ }
+ return nil
+}
+
+func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
+ switch curve {
+ case elliptic.P224():
+ return oidNamedCurveP224, true
+ case elliptic.P256():
+ return oidNamedCurveP256, true
+ case elliptic.P384():
+ return oidNamedCurveP384, true
+ case elliptic.P521():
+ return oidNamedCurveP521, true
+ }
+
+ return nil, false
+}
+
+// KeyUsage represents the set of actions that are valid for a given key. It's
+// a bitmap of the KeyUsage* constants.
+type KeyUsage int
+
+const (
+ KeyUsageDigitalSignature KeyUsage = 1 << iota
+ KeyUsageContentCommitment
+ KeyUsageKeyEncipherment
+ KeyUsageDataEncipherment
+ KeyUsageKeyAgreement
+ KeyUsageCertSign
+ KeyUsageCRLSign
+ KeyUsageEncipherOnly
+ KeyUsageDecipherOnly
+)
+
+// RFC 5280, 4.2.1.12 Extended Key Usage
+//
+// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 }
+//
+// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
+//
+// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
+// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
+// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 }
+// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 }
+// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 }
+// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
+var (
+ oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0}
+ oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1}
+ oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2}
+ oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3}
+ oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4}
+ oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5}
+ oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6}
+ oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7}
+ oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8}
+ oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9}
+ oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3}
+ oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1}
+)
+
+// ExtKeyUsage represents an extended set of actions that are valid for a given key.
+// Each of the ExtKeyUsage* constants define a unique action.
+type ExtKeyUsage int
+
+const (
+ ExtKeyUsageAny ExtKeyUsage = iota
+ ExtKeyUsageServerAuth
+ ExtKeyUsageClientAuth
+ ExtKeyUsageCodeSigning
+ ExtKeyUsageEmailProtection
+ ExtKeyUsageIPSECEndSystem
+ ExtKeyUsageIPSECTunnel
+ ExtKeyUsageIPSECUser
+ ExtKeyUsageTimeStamping
+ ExtKeyUsageOCSPSigning
+ ExtKeyUsageMicrosoftServerGatedCrypto
+ ExtKeyUsageNetscapeServerGatedCrypto
+)
+
+// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID.
+var extKeyUsageOIDs = []struct {
+ extKeyUsage ExtKeyUsage
+ oid asn1.ObjectIdentifier
+}{
+ {ExtKeyUsageAny, oidExtKeyUsageAny},
+ {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth},
+ {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth},
+ {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning},
+ {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection},
+ {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem},
+ {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel},
+ {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser},
+ {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping},
+ {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning},
+ {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto},
+ {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto},
+}
+
+func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) {
+ for _, pair := range extKeyUsageOIDs {
+ if oid.Equal(pair.oid) {
+ return pair.extKeyUsage, true
+ }
+ }
+ return
+}
+
+func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) {
+ for _, pair := range extKeyUsageOIDs {
+ if eku == pair.extKeyUsage {
+ return pair.oid, true
+ }
+ }
+ return
+}
+
+// A Certificate represents an X.509 certificate.
+type Certificate struct {
+ Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
+ RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
+ RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
+ RawSubject []byte // DER encoded Subject
+ RawIssuer []byte // DER encoded Issuer
+
+ Signature []byte
+ SignatureAlgorithm SignatureAlgorithm
+
+ PublicKeyAlgorithm PublicKeyAlgorithm
+ PublicKey interface{}
+
+ Version int
+ SerialNumber *big.Int
+ Issuer pkix.Name
+ Subject pkix.Name
+ NotBefore, NotAfter time.Time // Validity bounds.
+ KeyUsage KeyUsage
+
+ // Extensions contains raw X.509 extensions. When parsing certificates,
+ // this can be used to extract non-critical extensions that are not
+ // parsed by this package. When marshaling certificates, the Extensions
+ // field is ignored, see ExtraExtensions.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any
+ // marshaled certificates. Values override any extensions that would
+ // otherwise be produced based on the other fields. The ExtraExtensions
+ // field is not populated when parsing certificates, see Extensions.
+ ExtraExtensions []pkix.Extension
+
+ ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages.
+ UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package.
+
+ BasicConstraintsValid bool // if true then the next two fields are valid.
+ IsCA bool
+ MaxPathLen int
+
+ SubjectKeyId []byte
+ AuthorityKeyId []byte
+
+ // RFC 5280, 4.2.2.1 (Authority Information Access)
+ OCSPServer []string
+ IssuingCertificateURL []string
+
+ // Subject Alternate Name values
+ DNSNames []string
+ EmailAddresses []string
+ IPAddresses []net.IP
+
+ // Name constraints
+ PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical.
+ PermittedDNSDomains []string
+
+ // CRL Distribution Points
+ CRLDistributionPoints []string
+
+ PolicyIdentifiers []asn1.ObjectIdentifier
+}
+
+// ErrUnsupportedAlgorithm results from attempting to perform an operation that
+// involves algorithms that are not currently implemented.
+var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented")
+
+// ConstraintViolationError results when a requested usage is not permitted by
+// a certificate. For example: checking a signature when the public key isn't a
+// certificate signing key.
+type ConstraintViolationError struct{}
+
+func (ConstraintViolationError) Error() string {
+ return "x509: invalid signature: parent certificate cannot sign this kind of certificate"
+}
+
+func (c *Certificate) Equal(other *Certificate) bool {
+ return bytes.Equal(c.Raw, other.Raw)
+}
+
+// Entrust have a broken root certificate (CN=Entrust.net Certification
+// Authority (2048)) which isn't marked as a CA certificate and is thus invalid
+// according to PKIX.
+// We recognise this certificate by its SubjectPublicKeyInfo and exempt it
+// from the Basic Constraints requirement.
+// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869
+//
+// TODO(agl): remove this hack once their reissued root is sufficiently
+// widespread.
+var entrustBrokenSPKI = []byte{
+ 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09,
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+ 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00,
+ 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01,
+ 0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05,
+ 0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3,
+ 0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff,
+ 0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10,
+ 0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff,
+ 0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50,
+ 0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8,
+ 0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6,
+ 0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04,
+ 0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c,
+ 0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65,
+ 0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38,
+ 0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda,
+ 0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9,
+ 0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7,
+ 0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37,
+ 0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde,
+ 0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6,
+ 0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c,
+ 0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a,
+ 0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5,
+ 0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2,
+ 0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc,
+ 0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4,
+ 0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b,
+ 0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e,
+ 0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48,
+ 0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05,
+ 0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09,
+ 0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2,
+ 0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d,
+ 0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68,
+ 0x55, 0x02, 0x03, 0x01, 0x00, 0x01,
+}
+
+// CheckSignatureFrom verifies that the signature on c is a valid signature
+// from parent.
+func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err error) {
+ // RFC 5280, 4.2.1.9:
+ // "If the basic constraints extension is not present in a version 3
+ // certificate, or the extension is present but the cA boolean is not
+ // asserted, then the certified public key MUST NOT be used to verify
+ // certificate signatures."
+ // (except for Entrust, see comment above entrustBrokenSPKI)
+ if (parent.Version == 3 && !parent.BasicConstraintsValid ||
+ parent.BasicConstraintsValid && !parent.IsCA) &&
+ !bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) {
+ return ConstraintViolationError{}
+ }
+
+ if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 {
+ return ConstraintViolationError{}
+ }
+
+ if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm {
+ return ErrUnsupportedAlgorithm
+ }
+
+ // TODO(agl): don't ignore the path length constraint.
+
+ return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature)
+}
+
+// CheckSignature verifies that signature is a valid signature over signed from
+// c's public key.
+func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err error) {
+ var hashType crypto.Hash
+
+ switch algo {
+ case SHA1WithRSA, DSAWithSHA1, ECDSAWithSHA1:
+ hashType = crypto.SHA1
+ case SHA256WithRSA, DSAWithSHA256, ECDSAWithSHA256:
+ hashType = crypto.SHA256
+ case SHA384WithRSA, ECDSAWithSHA384:
+ hashType = crypto.SHA384
+ case SHA512WithRSA, ECDSAWithSHA512:
+ hashType = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if !hashType.Available() {
+ return ErrUnsupportedAlgorithm
+ }
+ h := hashType.New()
+
+ h.Write(signed)
+ digest := h.Sum(nil)
+
+ switch pub := c.PublicKey.(type) {
+ case *rsa.PublicKey:
+ return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
+ case *dsa.PublicKey:
+ dsaSig := new(dsaSignature)
+ if _, err := asn1.Unmarshal(signature, dsaSig); err != nil {
+ return err
+ }
+ if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
+ return errors.New("x509: DSA signature contained zero or negative values")
+ }
+ if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) {
+ return errors.New("x509: DSA verification failure")
+ }
+ return
+ case *ecdsa.PublicKey:
+ ecdsaSig := new(ecdsaSignature)
+ if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
+ return err
+ }
+ if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+ return errors.New("x509: ECDSA signature contained zero or negative values")
+ }
+ if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
+ return errors.New("x509: ECDSA verification failure")
+ }
+ return
+ }
+ return ErrUnsupportedAlgorithm
+}
+
+// CheckCRLSignature checks that the signature in crl is from c.
+func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) (err error) {
+ algo := getSignatureAlgorithmFromOID(crl.SignatureAlgorithm.Algorithm)
+ return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
+}
+
+// START CT CHANGES
+type UnhandledCriticalExtension struct {
+ ID asn1.ObjectIdentifier
+}
+
+func (h UnhandledCriticalExtension) Error() string {
+ return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID)
+}
+
+// END CT CHANGES
+
+type basicConstraints struct {
+ IsCA bool `asn1:"optional"`
+ MaxPathLen int `asn1:"optional,default:-1"`
+}
+
+// RFC 5280 4.2.1.4
+type policyInformation struct {
+ Policy asn1.ObjectIdentifier
+ // policyQualifiers omitted
+}
+
+// RFC 5280, 4.2.1.10
+type nameConstraints struct {
+ Permitted []generalSubtree `asn1:"optional,tag:0"`
+ Excluded []generalSubtree `asn1:"optional,tag:1"`
+}
+
+type generalSubtree struct {
+ Name string `asn1:"tag:2,optional,ia5"`
+}
+
+// RFC 5280, 4.2.2.1
+type authorityInfoAccess struct {
+ Method asn1.ObjectIdentifier
+ Location asn1.RawValue
+}
+
+// RFC 5280, 4.2.1.14
+type distributionPoint struct {
+ DistributionPoint distributionPointName `asn1:"optional,tag:0"`
+ Reason asn1.BitString `asn1:"optional,tag:1"`
+ CRLIssuer asn1.RawValue `asn1:"optional,tag:2"`
+}
+
+type distributionPointName struct {
+ FullName asn1.RawValue `asn1:"optional,tag:0"`
+ RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
+}
+
+func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
+ asn1Data := keyData.PublicKey.RightAlign()
+ switch algo {
+ case RSA:
+ p := new(rsaPublicKey)
+ _, err := asn1.Unmarshal(asn1Data, p)
+ if err != nil {
+ return nil, err
+ }
+
+ if p.N.Sign() <= 0 {
+ return nil, errors.New("x509: RSA modulus is not a positive number")
+ }
+ if p.E <= 0 {
+ return nil, errors.New("x509: RSA public exponent is not a positive number")
+ }
+
+ pub := &rsa.PublicKey{
+ E: p.E,
+ N: p.N,
+ }
+ return pub, nil
+ case DSA:
+ var p *big.Int
+ _, err := asn1.Unmarshal(asn1Data, &p)
+ if err != nil {
+ return nil, err
+ }
+ paramsData := keyData.Algorithm.Parameters.FullBytes
+ params := new(dsaAlgorithmParameters)
+ _, err = asn1.Unmarshal(paramsData, params)
+ if err != nil {
+ return nil, err
+ }
+ if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 {
+ return nil, errors.New("x509: zero or negative DSA parameter")
+ }
+ pub := &dsa.PublicKey{
+ Parameters: dsa.Parameters{
+ P: params.P,
+ Q: params.Q,
+ G: params.G,
+ },
+ Y: p,
+ }
+ return pub, nil
+ case ECDSA:
+ paramsData := keyData.Algorithm.Parameters.FullBytes
+ namedCurveOID := new(asn1.ObjectIdentifier)
+ _, err := asn1.Unmarshal(paramsData, namedCurveOID)
+ if err != nil {
+ return nil, err
+ }
+ namedCurve := namedCurveFromOID(*namedCurveOID)
+ if namedCurve == nil {
+ return nil, errors.New("x509: unsupported elliptic curve")
+ }
+ x, y := elliptic.Unmarshal(namedCurve, asn1Data)
+ if x == nil {
+ return nil, errors.New("x509: failed to unmarshal elliptic curve point")
+ }
+ pub := &ecdsa.PublicKey{
+ Curve: namedCurve,
+ X: x,
+ Y: y,
+ }
+ return pub, nil
+ default:
+ return nil, nil
+ }
+}
+
+// START CT CHANGES
+
+// NonFatalErrors is an error type which can hold a number of other errors.
+// It's used to collect a range of non-fatal errors which occur while parsing
+// a certificate, that way we can still match on certs which technically are
+// invalid.
+type NonFatalErrors struct {
+ Errors []error
+}
+
+// Adds an error to the list of errors contained by NonFatalErrors.
+func (e *NonFatalErrors) AddError(err error) {
+ e.Errors = append(e.Errors, err)
+}
+
+// Returns a string consisting of the values of Error() from all of the errors
+// contained in |e|
+func (e NonFatalErrors) Error() string {
+ r := "NonFatalErrors: "
+ for _, err := range e.Errors {
+ r += err.Error() + "; "
+ }
+ return r
+}
+
+// Returns true if |e| contains at least one error
+func (e *NonFatalErrors) HasError() bool {
+ return len(e.Errors) > 0
+}
+
+// END CT CHANGES
+
+func parseCertificate(in *certificate) (*Certificate, error) {
+ // START CT CHANGES
+ var nfe NonFatalErrors
+ // END CT CHANGES
+
+ out := new(Certificate)
+ out.Raw = in.Raw
+ out.RawTBSCertificate = in.TBSCertificate.Raw
+ out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw
+ out.RawSubject = in.TBSCertificate.Subject.FullBytes
+ out.RawIssuer = in.TBSCertificate.Issuer.FullBytes
+
+ out.Signature = in.SignatureValue.RightAlign()
+ out.SignatureAlgorithm =
+ getSignatureAlgorithmFromOID(in.TBSCertificate.SignatureAlgorithm.Algorithm)
+
+ out.PublicKeyAlgorithm =
+ getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
+ var err error
+ out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if in.TBSCertificate.SerialNumber.Sign() < 0 {
+ // START CT CHANGES
+ nfe.AddError(errors.New("x509: negative serial number"))
+ // END CT CHANGES
+ }
+
+ out.Version = in.TBSCertificate.Version + 1
+ out.SerialNumber = in.TBSCertificate.SerialNumber
+
+ var issuer, subject pkix.RDNSequence
+ if _, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil {
+ return nil, err
+ }
+ if _, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil {
+ return nil, err
+ }
+
+ out.Issuer.FillFromRDNSequence(&issuer)
+ out.Subject.FillFromRDNSequence(&subject)
+
+ out.NotBefore = in.TBSCertificate.Validity.NotBefore
+ out.NotAfter = in.TBSCertificate.Validity.NotAfter
+
+ for _, e := range in.TBSCertificate.Extensions {
+ out.Extensions = append(out.Extensions, e)
+
+ if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 {
+ switch e.Id[3] {
+ case 15:
+ // RFC 5280, 4.2.1.3
+ var usageBits asn1.BitString
+ _, err := asn1.Unmarshal(e.Value, &usageBits)
+
+ if err == nil {
+ var usage int
+ for i := 0; i < 9; i++ {
+ if usageBits.At(i) != 0 {
+ usage |= 1 << uint(i)
+ }
+ }
+ out.KeyUsage = KeyUsage(usage)
+ continue
+ }
+ case 19:
+ // RFC 5280, 4.2.1.9
+ var constraints basicConstraints
+ _, err := asn1.Unmarshal(e.Value, &constraints)
+
+ if err == nil {
+ out.BasicConstraintsValid = true
+ out.IsCA = constraints.IsCA
+ out.MaxPathLen = constraints.MaxPathLen
+ continue
+ }
+ case 17:
+ // RFC 5280, 4.2.1.6
+
+ // SubjectAltName ::= GeneralNames
+ //
+ // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
+ //
+ // GeneralName ::= CHOICE {
+ // otherName [0] OtherName,
+ // rfc822Name [1] IA5String,
+ // dNSName [2] IA5String,
+ // x400Address [3] ORAddress,
+ // directoryName [4] Name,
+ // ediPartyName [5] EDIPartyName,
+ // uniformResourceIdentifier [6] IA5String,
+ // iPAddress [7] OCTET STRING,
+ // registeredID [8] OBJECT IDENTIFIER }
+ var seq asn1.RawValue
+ _, err := asn1.Unmarshal(e.Value, &seq)
+ if err != nil {
+ return nil, err
+ }
+ if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 {
+ return nil, asn1.StructuralError{Msg: "bad SAN sequence"}
+ }
+
+ parsedName := false
+
+ rest := seq.Bytes
+ for len(rest) > 0 {
+ var v asn1.RawValue
+ rest, err = asn1.Unmarshal(rest, &v)
+ if err != nil {
+ return nil, err
+ }
+ switch v.Tag {
+ case 1:
+ out.EmailAddresses = append(out.EmailAddresses, string(v.Bytes))
+ parsedName = true
+ case 2:
+ out.DNSNames = append(out.DNSNames, string(v.Bytes))
+ parsedName = true
+ case 7:
+ switch len(v.Bytes) {
+ case net.IPv4len, net.IPv6len:
+ out.IPAddresses = append(out.IPAddresses, v.Bytes)
+ default:
+ // START CT CHANGES
+ nfe.AddError(fmt.Errorf("x509: certificate contained IP address of length %d : %v", len(v.Bytes), v.Bytes))
+ // END CT CHANGES
+ }
+ }
+ }
+
+ if parsedName {
+ continue
+ }
+ // If we didn't parse any of the names then we
+ // fall through to the critical check below.
+
+ case 30:
+ // RFC 5280, 4.2.1.10
+
+ // NameConstraints ::= SEQUENCE {
+ // permittedSubtrees [0] GeneralSubtrees OPTIONAL,
+ // excludedSubtrees [1] GeneralSubtrees OPTIONAL }
+ //
+ // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree
+ //
+ // GeneralSubtree ::= SEQUENCE {
+ // base GeneralName,
+ // minimum [0] BaseDistance DEFAULT 0,
+ // maximum [1] BaseDistance OPTIONAL }
+ //
+ // BaseDistance ::= INTEGER (0..MAX)
+
+ var constraints nameConstraints
+ _, err := asn1.Unmarshal(e.Value, &constraints)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(constraints.Excluded) > 0 && e.Critical {
+ // START CT CHANGES
+ nfe.AddError(UnhandledCriticalExtension{e.Id})
+ // END CT CHANGES
+ }
+
+ for _, subtree := range constraints.Permitted {
+ if len(subtree.Name) == 0 {
+ if e.Critical {
+ // START CT CHANGES
+ nfe.AddError(UnhandledCriticalExtension{e.Id})
+ // END CT CHANGES
+ }
+ continue
+ }
+ out.PermittedDNSDomains = append(out.PermittedDNSDomains, subtree.Name)
+ }
+ continue
+
+ case 31:
+ // RFC 5280, 4.2.1.14
+
+ // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
+ //
+ // DistributionPoint ::= SEQUENCE {
+ // distributionPoint [0] DistributionPointName OPTIONAL,
+ // reasons [1] ReasonFlags OPTIONAL,
+ // cRLIssuer [2] GeneralNames OPTIONAL }
+ //
+ // DistributionPointName ::= CHOICE {
+ // fullName [0] GeneralNames,
+ // nameRelativeToCRLIssuer [1] RelativeDistinguishedName }
+
+ var cdp []distributionPoint
+ _, err := asn1.Unmarshal(e.Value, &cdp)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, dp := range cdp {
+ var n asn1.RawValue
+ _, err = asn1.Unmarshal(dp.DistributionPoint.FullName.Bytes, &n)
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Tag == 6 {
+ out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(n.Bytes))
+ }
+ }
+ continue
+
+ case 35:
+ // RFC 5280, 4.2.1.1
+ var a authKeyId
+ _, err = asn1.Unmarshal(e.Value, &a)
+ if err != nil {
+ return nil, err
+ }
+ out.AuthorityKeyId = a.Id
+ continue
+
+ case 37:
+ // RFC 5280, 4.2.1.12. Extended Key Usage
+
+ // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 }
+ //
+ // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
+ //
+ // KeyPurposeId ::= OBJECT IDENTIFIER
+
+ var keyUsage []asn1.ObjectIdentifier
+ _, err = asn1.Unmarshal(e.Value, &keyUsage)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, u := range keyUsage {
+ if extKeyUsage, ok := extKeyUsageFromOID(u); ok {
+ out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage)
+ } else {
+ out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u)
+ }
+ }
+
+ continue
+
+ case 14:
+ // RFC 5280, 4.2.1.2
+ var keyid []byte
+ _, err = asn1.Unmarshal(e.Value, &keyid)
+ if err != nil {
+ return nil, err
+ }
+ out.SubjectKeyId = keyid
+ continue
+
+ case 32:
+ // RFC 5280 4.2.1.4: Certificate Policies
+ var policies []policyInformation
+ if _, err = asn1.Unmarshal(e.Value, &policies); err != nil {
+ return nil, err
+ }
+ out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies))
+ for i, policy := range policies {
+ out.PolicyIdentifiers[i] = policy.Policy
+ }
+ }
+ } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) {
+ // RFC 5280 4.2.2.1: Authority Information Access
+ var aia []authorityInfoAccess
+ if _, err = asn1.Unmarshal(e.Value, &aia); err != nil {
+ return nil, err
+ }
+
+ for _, v := range aia {
+ // GeneralName: uniformResourceIdentifier [6] IA5String
+ if v.Location.Tag != 6 {
+ continue
+ }
+ if v.Method.Equal(oidAuthorityInfoAccessOcsp) {
+ out.OCSPServer = append(out.OCSPServer, string(v.Location.Bytes))
+ } else if v.Method.Equal(oidAuthorityInfoAccessIssuers) {
+ out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes))
+ }
+ }
+ }
+
+ if e.Critical {
+ // START CT CHANGES
+ nfe.AddError(UnhandledCriticalExtension{e.Id})
+ // END CT CHANGES
+ }
+ }
+ // START CT CHANGES
+ if nfe.HasError() {
+ return out, nfe
+ }
+ // END CT CHANGES
+ return out, nil
+}
+
+// START CT CHANGES
+
+// ParseTBSCertificate parses a single TBSCertificate from the given ASN.1 DER data.
+// The parsed data is returned in a Certificate struct for ease of access.
+func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) {
+ var tbsCert tbsCertificate
+ rest, err := asn1.Unmarshal(asn1Data, &tbsCert)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+ return parseCertificate(&certificate{
+ Raw: tbsCert.Raw,
+ TBSCertificate: tbsCert})
+}
+
+// END CT CHANGES
+
+// ParseCertificate parses a single certificate from the given ASN.1 DER data.
+func ParseCertificate(asn1Data []byte) (*Certificate, error) {
+ var cert certificate
+ rest, err := asn1.Unmarshal(asn1Data, &cert)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+
+ return parseCertificate(&cert)
+}
+
+// ParseCertificates parses one or more certificates from the given ASN.1 DER
+// data. The certificates must be concatenated with no intermediate padding.
+func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
+ var v []*certificate
+
+ for len(asn1Data) > 0 {
+ cert := new(certificate)
+ var err error
+ asn1Data, err = asn1.Unmarshal(asn1Data, cert)
+ if err != nil {
+ return nil, err
+ }
+ v = append(v, cert)
+ }
+
+ ret := make([]*Certificate, len(v))
+ for i, ci := range v {
+ cert, err := parseCertificate(ci)
+ if err != nil {
+ return nil, err
+ }
+ ret[i] = cert
+ }
+
+ return ret, nil
+}
+
+func reverseBitsInAByte(in byte) byte {
+ b1 := in>>4 | in<<4
+ b2 := b1>>2&0x33 | b1<<2&0xcc
+ b3 := b2>>1&0x55 | b2<<1&0xaa
+ return b3
+}
+
+var (
+ oidExtensionSubjectKeyId = []int{2, 5, 29, 14}
+ oidExtensionKeyUsage = []int{2, 5, 29, 15}
+ oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37}
+ oidExtensionAuthorityKeyId = []int{2, 5, 29, 35}
+ oidExtensionBasicConstraints = []int{2, 5, 29, 19}
+ oidExtensionSubjectAltName = []int{2, 5, 29, 17}
+ oidExtensionCertificatePolicies = []int{2, 5, 29, 32}
+ oidExtensionNameConstraints = []int{2, 5, 29, 30}
+ oidExtensionCRLDistributionPoints = []int{2, 5, 29, 31}
+ oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1}
+)
+
+var (
+ oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
+ oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
+)
+
+// oidNotInExtensions returns whether an extension with the given oid exists in
+// extensions.
+func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
+ for _, e := range extensions {
+ if e.Id.Equal(oid) {
+ return true
+ }
+ }
+ return false
+}
+
+func buildExtensions(template *Certificate) (ret []pkix.Extension, err error) {
+ ret = make([]pkix.Extension, 10 /* maximum number of elements. */)
+ n := 0
+
+ if template.KeyUsage != 0 &&
+ !oidInExtensions(oidExtensionKeyUsage, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionKeyUsage
+ ret[n].Critical = true
+
+ var a [2]byte
+ a[0] = reverseBitsInAByte(byte(template.KeyUsage))
+ a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8))
+
+ l := 1
+ if a[1] != 0 {
+ l = 2
+ }
+
+ ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: a[0:l], BitLength: l * 8})
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) &&
+ !oidInExtensions(oidExtensionExtendedKeyUsage, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionExtendedKeyUsage
+
+ var oids []asn1.ObjectIdentifier
+ for _, u := range template.ExtKeyUsage {
+ if oid, ok := oidFromExtKeyUsage(u); ok {
+ oids = append(oids, oid)
+ } else {
+ panic("internal error")
+ }
+ }
+
+ oids = append(oids, template.UnknownExtKeyUsage...)
+
+ ret[n].Value, err = asn1.Marshal(oids)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if template.BasicConstraintsValid && !oidInExtensions(oidExtensionBasicConstraints, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionBasicConstraints
+ ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, template.MaxPathLen})
+ ret[n].Critical = true
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.SubjectKeyId) > 0 && !oidInExtensions(oidExtensionSubjectKeyId, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionSubjectKeyId
+ ret[n].Value, err = asn1.Marshal(template.SubjectKeyId)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.AuthorityKeyId) > 0 && !oidInExtensions(oidExtensionAuthorityKeyId, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionAuthorityKeyId
+ ret[n].Value, err = asn1.Marshal(authKeyId{template.AuthorityKeyId})
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
+ !oidInExtensions(oidExtensionAuthorityInfoAccess, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionAuthorityInfoAccess
+ var aiaValues []authorityInfoAccess
+ for _, name := range template.OCSPServer {
+ aiaValues = append(aiaValues, authorityInfoAccess{
+ Method: oidAuthorityInfoAccessOcsp,
+ Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)},
+ })
+ }
+ for _, name := range template.IssuingCertificateURL {
+ aiaValues = append(aiaValues, authorityInfoAccess{
+ Method: oidAuthorityInfoAccessIssuers,
+ Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)},
+ })
+ }
+ ret[n].Value, err = asn1.Marshal(aiaValues)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0) &&
+ !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionSubjectAltName
+ var rawValues []asn1.RawValue
+ for _, name := range template.DNSNames {
+ rawValues = append(rawValues, asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(name)})
+ }
+ for _, email := range template.EmailAddresses {
+ rawValues = append(rawValues, asn1.RawValue{Tag: 1, Class: 2, Bytes: []byte(email)})
+ }
+ for _, rawIP := range template.IPAddresses {
+ // If possible, we always want to encode IPv4 addresses in 4 bytes.
+ ip := rawIP.To4()
+ if ip == nil {
+ ip = rawIP
+ }
+ rawValues = append(rawValues, asn1.RawValue{Tag: 7, Class: 2, Bytes: ip})
+ }
+ ret[n].Value, err = asn1.Marshal(rawValues)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.PolicyIdentifiers) > 0 &&
+ !oidInExtensions(oidExtensionCertificatePolicies, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionCertificatePolicies
+ policies := make([]policyInformation, len(template.PolicyIdentifiers))
+ for i, policy := range template.PolicyIdentifiers {
+ policies[i].Policy = policy
+ }
+ ret[n].Value, err = asn1.Marshal(policies)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.PermittedDNSDomains) > 0 &&
+ !oidInExtensions(oidExtensionNameConstraints, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionNameConstraints
+ ret[n].Critical = template.PermittedDNSDomainsCritical
+
+ var out nameConstraints
+ out.Permitted = make([]generalSubtree, len(template.PermittedDNSDomains))
+ for i, permitted := range template.PermittedDNSDomains {
+ out.Permitted[i] = generalSubtree{Name: permitted}
+ }
+ ret[n].Value, err = asn1.Marshal(out)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.CRLDistributionPoints) > 0 &&
+ !oidInExtensions(oidExtensionCRLDistributionPoints, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionCRLDistributionPoints
+
+ var crlDp []distributionPoint
+ for _, name := range template.CRLDistributionPoints {
+ rawFullName, _ := asn1.Marshal(asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)})
+
+ dp := distributionPoint{
+ DistributionPoint: distributionPointName{
+ FullName: asn1.RawValue{Tag: 0, Class: 2, Bytes: rawFullName},
+ },
+ }
+ crlDp = append(crlDp, dp)
+ }
+
+ ret[n].Value, err = asn1.Marshal(crlDp)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ // Adding another extension here? Remember to update the maximum number
+ // of elements in the make() at the top of the function.
+
+ return append(ret[:n], template.ExtraExtensions...), nil
+}
+
+func subjectBytes(cert *Certificate) ([]byte, error) {
+ if len(cert.RawSubject) > 0 {
+ return cert.RawSubject, nil
+ }
+
+ return asn1.Marshal(cert.Subject.ToRDNSequence())
+}
+
+// CreateCertificate creates a new certificate based on a template. The
+// following members of template are used: SerialNumber, Subject, NotBefore,
+// NotAfter, KeyUsage, ExtKeyUsage, UnknownExtKeyUsage, BasicConstraintsValid,
+// IsCA, MaxPathLen, SubjectKeyId, DNSNames, PermittedDNSDomainsCritical,
+// PermittedDNSDomains.
+//
+// The certificate is signed by parent. If parent is equal to template then the
+// certificate is self-signed. The parameter pub is the public key of the
+// signee and priv is the private key of the signer.
+//
+// The returned slice is the certificate in DER encoding.
+//
+// The only supported key types are RSA and ECDSA (*rsa.PublicKey or
+// *ecdsa.PublicKey for pub, *rsa.PrivateKey or *ecdsa.PublicKey for priv).
+func CreateCertificate(rand io.Reader, template, parent *Certificate, pub interface{}, priv interface{}) (cert []byte, err error) {
+ var publicKeyBytes []byte
+ var publicKeyAlgorithm pkix.AlgorithmIdentifier
+
+ if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil {
+ return nil, err
+ }
+
+ var signatureAlgorithm pkix.AlgorithmIdentifier
+ var hashFunc crypto.Hash
+
+ switch priv := priv.(type) {
+ case *rsa.PrivateKey:
+ signatureAlgorithm.Algorithm = oidSignatureSHA1WithRSA
+ hashFunc = crypto.SHA1
+ case *ecdsa.PrivateKey:
+ switch priv.Curve {
+ case elliptic.P224(), elliptic.P256():
+ hashFunc = crypto.SHA256
+ signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA256
+ case elliptic.P384():
+ hashFunc = crypto.SHA384
+ signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA384
+ case elliptic.P521():
+ hashFunc = crypto.SHA512
+ signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA512
+ default:
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+ default:
+ return nil, errors.New("x509: only RSA and ECDSA private keys supported")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if len(parent.SubjectKeyId) > 0 {
+ template.AuthorityKeyId = parent.SubjectKeyId
+ }
+
+ extensions, err := buildExtensions(template)
+ if err != nil {
+ return
+ }
+
+ asn1Issuer, err := subjectBytes(parent)
+ if err != nil {
+ return
+ }
+
+ asn1Subject, err := subjectBytes(template)
+ if err != nil {
+ return
+ }
+
+ encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes}
+ c := tbsCertificate{
+ Version: 2,
+ SerialNumber: template.SerialNumber,
+ SignatureAlgorithm: signatureAlgorithm,
+ Issuer: asn1.RawValue{FullBytes: asn1Issuer},
+ Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()},
+ Subject: asn1.RawValue{FullBytes: asn1Subject},
+ PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey},
+ Extensions: extensions,
+ }
+
+ tbsCertContents, err := asn1.Marshal(c)
+ if err != nil {
+ return
+ }
+
+ c.Raw = tbsCertContents
+
+ h := hashFunc.New()
+ h.Write(tbsCertContents)
+ digest := h.Sum(nil)
+
+ var signature []byte
+
+ switch priv := priv.(type) {
+ case *rsa.PrivateKey:
+ signature, err = rsa.SignPKCS1v15(rand, priv, hashFunc, digest)
+ case *ecdsa.PrivateKey:
+ var r, s *big.Int
+ if r, s, err = ecdsa.Sign(rand, priv, digest); err == nil {
+ signature, err = asn1.Marshal(ecdsaSignature{r, s})
+ }
+ default:
+ panic("internal error")
+ }
+
+ if err != nil {
+ return
+ }
+
+ cert, err = asn1.Marshal(certificate{
+ nil,
+ c,
+ signatureAlgorithm,
+ asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
+ })
+ return
+}
+
+// pemCRLPrefix is the magic string that indicates that we have a PEM encoded
+// CRL.
+var pemCRLPrefix = []byte("-----BEGIN X509 CRL")
+
+// pemType is the type of a PEM encoded CRL.
+var pemType = "X509 CRL"
+
+// ParseCRL parses a CRL from the given bytes. It's often the case that PEM
+// encoded CRLs will appear where they should be DER encoded, so this function
+// will transparently handle PEM encoding as long as there isn't any leading
+// garbage.
+func ParseCRL(crlBytes []byte) (certList *pkix.CertificateList, err error) {
+ if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
+ block, _ := pem.Decode(crlBytes)
+ if block != nil && block.Type == pemType {
+ crlBytes = block.Bytes
+ }
+ }
+ return ParseDERCRL(crlBytes)
+}
+
+// ParseDERCRL parses a DER encoded CRL from the given bytes.
+func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err error) {
+ certList = new(pkix.CertificateList)
+ _, err = asn1.Unmarshal(derBytes, certList)
+ if err != nil {
+ certList = nil
+ }
+ return
+}
+
+// CreateCRL returns a DER encoded CRL, signed by this Certificate, that
+// contains the given list of revoked certificates.
+//
+// The only supported key type is RSA (*rsa.PrivateKey for priv).
+func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
+ rsaPriv, ok := priv.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("x509: non-RSA private keys not supported")
+ }
+ tbsCertList := pkix.TBSCertificateList{
+ Version: 2,
+ Signature: pkix.AlgorithmIdentifier{
+ Algorithm: oidSignatureSHA1WithRSA,
+ },
+ Issuer: c.Subject.ToRDNSequence(),
+ ThisUpdate: now.UTC(),
+ NextUpdate: expiry.UTC(),
+ RevokedCertificates: revokedCerts,
+ }
+
+ tbsCertListContents, err := asn1.Marshal(tbsCertList)
+ if err != nil {
+ return
+ }
+
+ h := sha1.New()
+ h.Write(tbsCertListContents)
+ digest := h.Sum(nil)
+
+ signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest)
+ if err != nil {
+ return
+ }
+
+ return asn1.Marshal(pkix.CertificateList{
+ TBSCertList: tbsCertList,
+ SignatureAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: oidSignatureSHA1WithRSA,
+ },
+ SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
+ })
+}
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/src/github.com/hashicorp/go-immutable-radix/.gitignore
new file mode 100644
index 0000000000..daf913b1b3
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/src/github.com/hashicorp/go-immutable-radix/.travis.yml
new file mode 100644
index 0000000000..1a0bbea6c7
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go:
+ - tip
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/src/github.com/hashicorp/go-immutable-radix/LICENSE
new file mode 100644
index 0000000000..e87a115e46
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/README.md b/vendor/src/github.com/hashicorp/go-immutable-radix/README.md
new file mode 100644
index 0000000000..8910fcc035
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/README.md
@@ -0,0 +1,41 @@
+go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix)
+=========
+
+Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
+The package only provides a single `Tree` implementation, optimized for sparse nodes.
+
+As a radix tree, it provides the following:
+ * O(k) operations. In many cases, this can be faster than a hash table since
+ the hash function is an O(k) operation, and hash tables have very poor cache locality.
+ * Minimum / Maximum value lookups
+ * Ordered iteration
+
+A tree supports using a transaction to batch multiple updates (insert, delete)
+in a more efficient manner than performing each operation one at a time.
+
+For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("foo"), 1)
+r, _, _ = r.Insert([]byte("bar"), 2)
+r, _, _ = r.Insert([]byte("foobar"), 2)
+
+// Find the longest prefix match
+m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
+if string(m) != "foo" {
+ panic("should be foo")
+}
+```
+
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/src/github.com/hashicorp/go-immutable-radix/edges.go
new file mode 100644
index 0000000000..a63674775f
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/edges.go
@@ -0,0 +1,21 @@
+package iradix
+
+import "sort"
+
+type edges []edge
+
+func (e edges) Len() int {
+ return len(e)
+}
+
+func (e edges) Less(i, j int) bool {
+ return e[i].label < e[j].label
+}
+
+func (e edges) Swap(i, j int) {
+ e[i], e[j] = e[j], e[i]
+}
+
+func (e edges) Sort() {
+ sort.Sort(e)
+}
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/src/github.com/hashicorp/go-immutable-radix/iradix.go
new file mode 100644
index 0000000000..b25558388f
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/iradix.go
@@ -0,0 +1,333 @@
+package iradix
+
+import (
+ "bytes"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // defaultModifiedCache is the default size of the modified node
+ // cache used per transaction. This is used to cache the updates
+ // to the nodes near the root, while the leaves do not need to be
+ // cached. This is important for very large transactions to prevent
+ // the modified cache from growing to be enormous.
+ defaultModifiedCache = 8192
+)
+
+// Tree implements an immutable radix tree. This can be treated as a
+// Dictionary abstract data type. The main advantage over a standard
+// hash map is prefix-based lookups and ordered iteration. The immutability
+// means that it is safe to concurrently read from a Tree without any
+// coordination.
+type Tree struct {
+ root *Node
+ size int
+}
+
+// New returns an empty Tree
+func New() *Tree {
+ t := &Tree{root: &Node{}}
+ return t
+}
+
+// Len is used to return the number of elements in the tree
+func (t *Tree) Len() int {
+ return t.size
+}
+
+// Txn is a transaction on the tree. This transaction is applied
+// atomically and returns a new tree when committed. A transaction
+// is not thread safe, and should only be used by a single goroutine.
+type Txn struct {
+ root *Node
+ size int
+ modified *simplelru.LRU
+}
+
+// Txn starts a new transaction that can be used to mutate the tree
+func (t *Tree) Txn() *Txn {
+ txn := &Txn{
+ root: t.root,
+ size: t.size,
+ }
+ return txn
+}
+
+// writeNode returns a node to be modified, if the current
+// node as already been modified during the course of
+// the transaction, it is used in-place.
+func (t *Txn) writeNode(n *Node) *Node {
+ // Ensure the modified set exists
+ if t.modified == nil {
+ lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
+ if err != nil {
+ panic(err)
+ }
+ t.modified = lru
+ }
+
+ // If this node has already been modified, we can
+ // continue to use it during this transaction.
+ if _, ok := t.modified.Get(n); ok {
+ return n
+ }
+
+ // Copy the existing node
+ nc := new(Node)
+ if n.prefix != nil {
+ nc.prefix = make([]byte, len(n.prefix))
+ copy(nc.prefix, n.prefix)
+ }
+ if n.leaf != nil {
+ nc.leaf = new(leafNode)
+ *nc.leaf = *n.leaf
+ }
+ if len(n.edges) != 0 {
+ nc.edges = make([]edge, len(n.edges))
+ copy(nc.edges, n.edges)
+ }
+
+ // Mark this node as modified
+ t.modified.Add(n, nil)
+ return nc
+}
+
+// insert does a recursive insertion
+func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
+ // Handle key exhaution
+ if len(search) == 0 {
+ nc := t.writeNode(n)
+ if n.isLeaf() {
+ old := nc.leaf.val
+ nc.leaf.val = v
+ return nc, old, true
+ } else {
+ nc.leaf = &leafNode{
+ key: k,
+ val: v,
+ }
+ return nc, nil, false
+ }
+ }
+
+ // Look for the edge
+ idx, child := n.getEdge(search[0])
+
+ // No edge, create one
+ if child == nil {
+ e := edge{
+ label: search[0],
+ node: &Node{
+ leaf: &leafNode{
+ key: k,
+ val: v,
+ },
+ prefix: search,
+ },
+ }
+ nc := t.writeNode(n)
+ nc.addEdge(e)
+ return nc, nil, false
+ }
+
+ // Determine longest prefix of the search key on match
+ commonPrefix := longestPrefix(search, child.prefix)
+ if commonPrefix == len(child.prefix) {
+ search = search[commonPrefix:]
+ newChild, oldVal, didUpdate := t.insert(child, k, search, v)
+ if newChild != nil {
+ nc := t.writeNode(n)
+ nc.edges[idx].node = newChild
+ return nc, oldVal, didUpdate
+ }
+ return nil, oldVal, didUpdate
+ }
+
+ // Split the node
+ nc := t.writeNode(n)
+ splitNode := &Node{
+ prefix: search[:commonPrefix],
+ }
+ nc.replaceEdge(edge{
+ label: search[0],
+ node: splitNode,
+ })
+
+ // Restore the existing child node
+ modChild := t.writeNode(child)
+ splitNode.addEdge(edge{
+ label: modChild.prefix[commonPrefix],
+ node: modChild,
+ })
+ modChild.prefix = modChild.prefix[commonPrefix:]
+
+ // Create a new leaf node
+ leaf := &leafNode{
+ key: k,
+ val: v,
+ }
+
+ // If the new key is a subset, add to to this node
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ splitNode.leaf = leaf
+ return nc, nil, false
+ }
+
+ // Create a new edge for the node
+ splitNode.addEdge(edge{
+ label: search[0],
+ node: &Node{
+ leaf: leaf,
+ prefix: search,
+ },
+ })
+ return nc, nil, false
+}
+
+// delete does a recursive deletion
+func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
+ // Check for key exhaution
+ if len(search) == 0 {
+ if !n.isLeaf() {
+ return nil, nil
+ }
+
+ // Remove the leaf node
+ nc := t.writeNode(n)
+ nc.leaf = nil
+
+ // Check if this node should be merged
+ if n != t.root && len(nc.edges) == 1 {
+ nc.mergeChild()
+ }
+ return nc, n.leaf
+ }
+
+ // Look for an edge
+ label := search[0]
+ idx, child := n.getEdge(label)
+ if child == nil || !bytes.HasPrefix(search, child.prefix) {
+ return nil, nil
+ }
+
+ // Consume the search prefix
+ search = search[len(child.prefix):]
+ newChild, leaf := t.delete(n, child, search)
+ if newChild == nil {
+ return nil, nil
+ }
+
+ // Copy this node
+ nc := t.writeNode(n)
+
+ // Delete the edge if the node has no edges
+ if newChild.leaf == nil && len(newChild.edges) == 0 {
+ nc.delEdge(label)
+ if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
+ nc.mergeChild()
+ }
+ } else {
+ nc.edges[idx].node = newChild
+ }
+ return nc, leaf
+}
+
+// Insert is used to add or update a given key. The return provides
+// the previous value and a bool indicating if any was set.
+func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
+ newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if !didUpdate {
+ t.size++
+ }
+ return oldVal, didUpdate
+}
+
+// Delete is used to delete a given key. Returns the old value if any,
+// and a bool indicating if the key was set.
+func (t *Txn) Delete(k []byte) (interface{}, bool) {
+ newRoot, leaf := t.delete(nil, t.root, k)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if leaf != nil {
+ t.size--
+ return leaf.val, true
+ }
+ return nil, false
+}
+
+// Root returns the current root of the radix tree within this
+// transaction. The root is not safe across insert and delete operations,
+// but can be used to read the current state during a transaction.
+func (t *Txn) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Txn) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// Commit is used to finalize the transaction and return a new tree
+func (t *Txn) Commit() *Tree {
+ t.modified = nil
+ return &Tree{t.root, t.size}
+}
+
+// Insert is used to add or update a given key. The return provides
+// the new tree, previous value and a bool indicating if any was set.
+func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Insert(k, v)
+ return txn.Commit(), old, ok
+}
+
+// Delete is used to delete a given key. Returns the new tree,
+// old value if any, and a bool indicating if the key was set.
+func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Delete(k)
+ return txn.Commit(), old, ok
+}
+
+// Root returns the root node of the tree which can be used for richer
+// query operations.
+func (t *Tree) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Tree) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 []byte) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+// concat two byte slices, returning a third new copy
+func concat(a, b []byte) []byte {
+ c := make([]byte, len(a)+len(b))
+ copy(c, a)
+ copy(c[len(a):], b)
+ return c
+}
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/src/github.com/hashicorp/go-immutable-radix/iter.go
new file mode 100644
index 0000000000..75cbaa110f
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/iter.go
@@ -0,0 +1,81 @@
+package iradix
+
+import "bytes"
+
+// Iterator is used to iterate over a set of nodes
+// in pre-order
+type Iterator struct {
+ node *Node
+ stack []edges
+}
+
+// SeekPrefix is used to seek the iterator to a given prefix
+func (i *Iterator) SeekPrefix(prefix []byte) {
+ // Wipe the stack
+ i.stack = nil
+ n := i.node
+ search := prefix
+ for {
+ // Check for key exhaution
+ if len(search) == 0 {
+ i.node = n
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ i.node = nil
+ return
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ i.node = n
+ return
+ } else {
+ i.node = nil
+ return
+ }
+ }
+}
+
+// Next returns the next node in order
+func (i *Iterator) Next() ([]byte, interface{}, bool) {
+ // Initialize our stack if needed
+ if i.stack == nil && i.node != nil {
+ i.stack = []edges{
+ edges{
+ edge{node: i.node},
+ },
+ }
+ }
+
+ for len(i.stack) > 0 {
+ // Inspect the last element of the stack
+ n := len(i.stack)
+ last := i.stack[n-1]
+ elem := last[0].node
+
+ // Update the stack
+ if len(last) > 1 {
+ i.stack[n-1] = last[1:]
+ } else {
+ i.stack = i.stack[:n-1]
+ }
+
+ // Push the edges onto the frontier
+ if len(elem.edges) > 0 {
+ i.stack = append(i.stack, elem.edges)
+ }
+
+ // Return the leaf values if any
+ if elem.leaf != nil {
+ return elem.leaf.key, elem.leaf.val, true
+ }
+ }
+ return nil, nil, false
+}
diff --git a/vendor/src/github.com/hashicorp/go-immutable-radix/node.go b/vendor/src/github.com/hashicorp/go-immutable-radix/node.go
new file mode 100644
index 0000000000..fea6f63436
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-immutable-radix/node.go
@@ -0,0 +1,289 @@
+package iradix
+
+import (
+ "bytes"
+ "sort"
+)
+
+// WalkFn is used when walking the tree. Takes a
+// key and value, returning if iteration should
+// be terminated.
+type WalkFn func(k []byte, v interface{}) bool
+
+// leafNode is used to represent a value
+type leafNode struct {
+ key []byte
+ val interface{}
+}
+
+// edge is used to represent an edge node
+type edge struct {
+ label byte
+ node *Node
+}
+
+// Node is an immutable node in the radix tree
+type Node struct {
+ // leaf is used to store possible leaf
+ leaf *leafNode
+
+ // prefix is the common prefix we ignore
+ prefix []byte
+
+ // Edges should be stored in-order for iteration.
+ // We avoid a fully materialized slice to save memory,
+ // since in most cases we expect to be sparse
+ edges edges
+}
+
+func (n *Node) isLeaf() bool {
+ return n.leaf != nil
+}
+
+func (n *Node) addEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ n.edges = append(n.edges, e)
+ if idx != num {
+ copy(n.edges[idx+1:], n.edges[idx:num])
+ n.edges[idx] = e
+ }
+}
+
+func (n *Node) replaceEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ if idx < num && n.edges[idx].label == e.label {
+ n.edges[idx].node = e.node
+ return
+ }
+ panic("replacing missing edge")
+}
+
+func (n *Node) getEdge(label byte) (int, *Node) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ return idx, n.edges[idx].node
+ }
+ return -1, nil
+}
+
+func (n *Node) delEdge(label byte) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ copy(n.edges[idx:], n.edges[idx+1:])
+ n.edges[len(n.edges)-1] = edge{}
+ n.edges = n.edges[:len(n.edges)-1]
+ }
+}
+
+func (n *Node) mergeChild() {
+ e := n.edges[0]
+ child := e.node
+ n.prefix = concat(n.prefix, child.prefix)
+ if child.leaf != nil {
+ n.leaf = new(leafNode)
+ *n.leaf = *child.leaf
+ } else {
+ n.leaf = nil
+ }
+ if len(child.edges) != 0 {
+ n.edges = make([]edge, len(child.edges))
+ copy(n.edges, child.edges)
+ } else {
+ n.edges = nil
+ }
+}
+
+func (n *Node) Get(k []byte) (interface{}, bool) {
+ search := k
+ for {
+ // Check for key exhaution
+ if len(search) == 0 {
+ if n.isLeaf() {
+ return n.leaf.val, true
+ }
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ return nil, false
+}
+
+// LongestPrefix is like Get, but instead of an
+// exact match, it will return the longest prefix match.
+func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
+ var last *leafNode
+ search := k
+ for {
+ // Look for a leaf node
+ if n.isLeaf() {
+ last = n.leaf
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ if last != nil {
+ return last.key, last.val, true
+ }
+ return nil, nil, false
+}
+
+// Minimum is used to return the minimum value in the tree
+func (n *Node) Minimum() ([]byte, interface{}, bool) {
+ for {
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ }
+ if len(n.edges) > 0 {
+ n = n.edges[0].node
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Maximum is used to return the maximum value in the tree
+func (n *Node) Maximum() ([]byte, interface{}, bool) {
+ for {
+ if num := len(n.edges); num > 0 {
+ n = n.edges[num-1].node
+ continue
+ }
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Iterator is used to return an iterator at
+// the given node to walk the tree
+func (n *Node) Iterator() *Iterator {
+ return &Iterator{node: n}
+}
+
+// Walk is used to walk the tree
+func (n *Node) Walk(fn WalkFn) {
+ recursiveWalk(n, fn)
+}
+
+// WalkPrefix is used to walk the tree under a prefix
+func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
+ search := prefix
+ for {
+ // Check for key exhaution
+ if len(search) == 0 {
+ recursiveWalk(n, fn)
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ // Child may be under our search prefix
+ recursiveWalk(n, fn)
+ return
+ } else {
+ break
+ }
+ }
+}
+
+// WalkPath is used to walk the tree, but only visiting nodes
+// from the root down to a given leaf. Where WalkPrefix walks
+// all the entries *under* the given prefix, this walks the
+// entries *above* the given prefix.
+func (n *Node) WalkPath(path []byte, fn WalkFn) {
+ search := path
+ for {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ return
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+}
+
+// recursiveWalk is used to do a pre-order walk of a node
+// recursively. Returns true if the walk should be aborted
+func recursiveWalk(n *Node, fn WalkFn) bool {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, e := range n.edges {
+ if recursiveWalk(e.node, fn) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/src/github.com/hashicorp/go-memdb/.gitignore b/vendor/src/github.com/hashicorp/go-memdb/.gitignore
new file mode 100644
index 0000000000..daf913b1b3
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-memdb/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/src/github.com/hashicorp/go-memdb/LICENSE b/vendor/src/github.com/hashicorp/go-memdb/LICENSE
new file mode 100644
index 0000000000..e87a115e46
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-memdb/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/src/github.com/hashicorp/go-memdb/README.md b/vendor/src/github.com/hashicorp/go-memdb/README.md
new file mode 100644
index 0000000000..203a0af14b
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-memdb/README.md
@@ -0,0 +1,93 @@
+# go-memdb
+
+Provides the `memdb` package that implements a simple in-memory database
+built on immutable radix trees. The database provides Atomicity, Consistency
+and Isolation from ACID. Being that it is in-memory, it does not provide durability.
+The database is instantiated with a schema that specifies the tables and indicies
+that exist and allows transactions to be executed.
+
+The database provides the following:
+
+* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees
+ the database is able to support any number of concurrent readers without locking,
+ and allows a writer to make progress.
+
+* Transaction Support - The database allows for rich transactions, in which multiple
+ objects are inserted, updated or deleted. The transactions can span multiple tables,
+ and are applied atomically. The database provides atomicity and isolation in ACID
+ terminology, such that until commit the updates are not visible.
+
+* Rich Indexing - Tables can support any number of indexes, which can be simple like
+ a single field index, or more advanced compound field indexes. Certain types like
+ UUID can be efficiently compressed from strings into byte indexes for reduces
+ storage requirements.
+
+For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-memdb).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a sample struct
+type Person struct {
+ Email string
+ Name string
+ Age int
+}
+
+// Create the DB schema
+schema := &memdb.DBSchema{
+ Tables: map[string]*memdb.TableSchema{
+ "person": &memdb.TableSchema{
+ Name: "person",
+ Indexes: map[string]*memdb.IndexSchema{
+ "id": &memdb.IndexSchema{
+ Name: "id",
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "Email"},
+ },
+ },
+ },
+ },
+}
+
+// Create a new data base
+db, err := memdb.NewMemDB(schema)
+if err != nil {
+ panic(err)
+}
+
+// Create a write transaction
+txn := db.Txn(true)
+
+// Insert a new person
+p := &Person{"joe@aol.com", "Joe", 30}
+if err := txn.Insert("person", p); err != nil {
+ panic(err)
+}
+
+// Commit the transaction
+txn.Commit()
+
+// Create read-only transaction
+txn = db.Txn(false)
+defer txn.Abort()
+
+// Lookup by email
+raw, err := txn.First("person", "id", "joe@aol.com")
+if err != nil {
+ panic(err)
+}
+
+// Say hi!
+fmt.Printf("Hello %s!", raw.(*Person).Name)
+
+```
+
diff --git a/vendor/src/github.com/hashicorp/go-memdb/index.go b/vendor/src/github.com/hashicorp/go-memdb/index.go
new file mode 100644
index 0000000000..7237f33e27
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-memdb/index.go
@@ -0,0 +1,330 @@
+package memdb
+
+import (
+ "encoding/hex"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Indexer is an interface used for defining indexes
+type Indexer interface {
+ // FromObject is used to extract an index value from an
+ // object or to indicate that the index value is missing.
+ FromObject(raw interface{}) (bool, []byte, error)
+
+ // ExactFromArgs is used to build an exact index lookup
+ // based on arguments
+ FromArgs(args ...interface{}) ([]byte, error)
+}
+
+// PrefixIndexer can optionally be implemented for any
+// indexes that support prefix based iteration. This may
+// not apply to all indexes.
+type PrefixIndexer interface {
+ // PrefixFromArgs returns a prefix that should be used
+ // for scanning based on the arguments
+ PrefixFromArgs(args ...interface{}) ([]byte, error)
+}
+
+// StringFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field.
+type StringFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ val += "\x00"
+ return true, []byte(val), nil
+}
+
+func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ arg = strings.ToLower(arg)
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := s.FromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// UUIDFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field by treating
+// it as a UUID. This is an optimization to using a StringFieldIndex
+// as the UUID can be more compactly represented in byte form.
+type UUIDFieldIndex struct {
+ Field string
+}
+
+func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(u.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ buf, err := u.parseString(val, true)
+ return true, buf, err
+}
+
+func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, true)
+ case []byte:
+ if len(arg) != 16 {
+ return nil, fmt.Errorf("byte slice must be 16 characters")
+ }
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, false)
+ case []byte:
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+// parseString parses a UUID from the string. If enforceLength is false, it will
+// parse a partial UUID. An error is returned if the input, stripped of hyphens,
+// is not even length.
+func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) {
+ // Verify the length
+ l := len(s)
+ if enforceLength && l != 36 {
+ return nil, fmt.Errorf("UUID must be 36 characters")
+ } else if l > 36 {
+ return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l)
+ }
+
+ hyphens := strings.Count(s, "-")
+ if hyphens > 4 {
+ return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens)
+ }
+
+ // The sanitized length is the length of the original string without the "-".
+ sanitized := strings.Replace(s, "-", "", -1)
+ sanitizedLength := len(sanitized)
+ if sanitizedLength%2 != 0 {
+ return nil, fmt.Errorf("Input (without hyphens) must be even length")
+ }
+
+ dec, err := hex.DecodeString(sanitized)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid UUID: %v", err)
+ }
+
+ return dec, nil
+}
+
+// FieldSetIndex is used to extract a field from an object using reflection and
+// builds an index on whether the field is set by comparing it against its
+// type's nil value.
+type FieldSetIndex struct {
+ Field string
+}
+
+func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(f.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj)
+ }
+
+ if fv.Interface() == reflect.Zero(fv.Type()).Interface() {
+ return true, []byte{0}, nil
+ }
+
+ return true, []byte{1}, nil
+}
+
+func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// ConditionalIndex builds an index based on a condition specified by a passed
+// user function. This function may examine the passed object and return a
+// boolean to encapsulate an arbitrarily complex conditional.
+type ConditionalIndex struct {
+ Conditional ConditionalIndexFunc
+}
+
+// ConditionalIndexFunc is the required function interface for a
+// ConditionalIndex.
+type ConditionalIndexFunc func(obj interface{}) (bool, error)
+
+func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ // Call the user's function
+ res, err := c.Conditional(obj)
+ if err != nil {
+ return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err)
+ }
+
+ if res {
+ return true, []byte{1}, nil
+ }
+
+ return true, []byte{0}, nil
+}
+
+func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// fromBoolArgs is a helper that expects only a single boolean argument and
+// returns a single length byte array containing either a one or zero depending
+// on whether the passed input is true or false respectively.
+func fromBoolArgs(args []interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ if val, ok := args[0].(bool); !ok {
+ return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0])
+ } else if val {
+ return []byte{1}, nil
+ }
+
+ return []byte{0}, nil
+}
+
+// CompoundIndex is used to build an index using multiple sub-indexes
+// Prefix based iteration is supported as long as the appropriate prefix
+// of indexers support it. All sub-indexers are only assumed to expect
+// a single argument.
+type CompoundIndex struct {
+ Indexes []Indexer
+
+ // AllowMissing results in an index based on only the indexers
+ // that return data. If true, you may end up with 2/3 columns
+ // indexed which might be useful for an index scan. Otherwise,
+ // the CompoundIndex requires all indexers to be satisfied.
+ AllowMissing bool
+}
+
+func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) {
+ var out []byte
+ for i, idx := range c.Indexes {
+ ok, val, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break
+ } else {
+ return false, nil, nil
+ }
+ }
+ out = append(out, val...)
+ }
+ return true, out, nil
+}
+
+func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != len(c.Indexes) {
+ return nil, fmt.Errorf("less arguments than index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ return out, nil
+}
+
+func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) > len(c.Indexes) {
+ return nil, fmt.Errorf("more arguments than index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ if i+1 < len(args) {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ } else {
+ prefixIndexer, ok := c.Indexes[i].(PrefixIndexer)
+ if !ok {
+ return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i)
+ }
+ val, err := prefixIndexer.PrefixFromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ }
+ return out, nil
+}
diff --git a/vendor/src/github.com/hashicorp/go-memdb/memdb.go b/vendor/src/github.com/hashicorp/go-memdb/memdb.go
new file mode 100644
index 0000000000..1d708517db
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-memdb/memdb.go
@@ -0,0 +1,89 @@
+package memdb
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/hashicorp/go-immutable-radix"
+)
+
+// MemDB is an in-memory database. It provides a table abstraction,
+// which is used to store objects (rows) with multiple indexes based
+// on values. The database makes use of immutable radix trees to provide
+// transactions and MVCC.
+type MemDB struct {
+ schema *DBSchema
+ root unsafe.Pointer // *iradix.Tree underneath
+
+ // There can only be a single writter at once
+ writer sync.Mutex
+}
+
+// NewMemDB creates a new MemDB with the given schema
+func NewMemDB(schema *DBSchema) (*MemDB, error) {
+ // Validate the schema
+ if err := schema.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Create the MemDB
+ db := &MemDB{
+ schema: schema,
+ root: unsafe.Pointer(iradix.New()),
+ }
+ if err := db.initialize(); err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
+// getRoot is used to do an atomic load of the root pointer
+func (db *MemDB) getRoot() *iradix.Tree {
+ root := (*iradix.Tree)(atomic.LoadPointer(&db.root))
+ return root
+}
+
+// Txn is used to start a new transaction, in either read or write mode.
+// There can only be a single concurrent writer, but any number of readers.
+func (db *MemDB) Txn(write bool) *Txn {
+ if write {
+ db.writer.Lock()
+ }
+ txn := &Txn{
+ db: db,
+ write: write,
+ rootTxn: db.getRoot().Txn(),
+ }
+ return txn
+}
+
+// Snapshot is used to capture a point-in-time snapshot
+// of the database that will not be affected by any write
+// operations to the existing DB.
+func (db *MemDB) Snapshot() *MemDB {
+ clone := &MemDB{
+ schema: db.schema,
+ root: unsafe.Pointer(db.getRoot()),
+ }
+ return clone
+}
+
+// initialize is used to setup the DB for use after creation
+func (db *MemDB) initialize() error {
+ root := db.getRoot()
+ for tName, tableSchema := range db.schema.Tables {
+ for iName, _ := range tableSchema.Indexes {
+ index := iradix.New()
+ path := indexPath(tName, iName)
+ root, _, _ = root.Insert(path, index)
+ }
+ }
+ db.root = unsafe.Pointer(root)
+ return nil
+}
+
+// indexPath returns the path from the root to the given table index
+func indexPath(table, index string) []byte {
+ return []byte(table + "." + index)
+}
diff --git a/vendor/src/github.com/hashicorp/go-memdb/schema.go b/vendor/src/github.com/hashicorp/go-memdb/schema.go
new file mode 100644
index 0000000000..2b8ffb4760
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-memdb/schema.go
@@ -0,0 +1,76 @@
+package memdb
+
+import "fmt"
+
+// DBSchema contains the full database schema used for MemDB
+type DBSchema struct {
+ Tables map[string]*TableSchema
+}
+
+// Validate is used to validate the database schema
+func (s *DBSchema) Validate() error {
+ if s == nil {
+ return fmt.Errorf("missing schema")
+ }
+ if len(s.Tables) == 0 {
+ return fmt.Errorf("no tables defined")
+ }
+ for name, table := range s.Tables {
+ if name != table.Name {
+ return fmt.Errorf("table name mis-match for '%s'", name)
+ }
+ if err := table.Validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TableSchema contains the schema for a single table
+type TableSchema struct {
+ Name string
+ Indexes map[string]*IndexSchema
+}
+
+// Validate is used to validate the table schema
+func (s *TableSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing table name")
+ }
+ if len(s.Indexes) == 0 {
+ return fmt.Errorf("missing table schemas for '%s'", s.Name)
+ }
+ if _, ok := s.Indexes["id"]; !ok {
+ return fmt.Errorf("must have id index")
+ }
+ if !s.Indexes["id"].Unique {
+ return fmt.Errorf("id index must be unique")
+ }
+ for name, index := range s.Indexes {
+ if name != index.Name {
+ return fmt.Errorf("index name mis-match for '%s'", name)
+ }
+ if err := index.Validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// IndexSchema contains the schema for an index
+type IndexSchema struct {
+ Name string
+ AllowMissing bool
+ Unique bool
+ Indexer Indexer
+}
+
+func (s *IndexSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing index name")
+ }
+ if s.Indexer == nil {
+ return fmt.Errorf("missing index function for '%s'", s.Name)
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/hashicorp/go-memdb/txn.go b/vendor/src/github.com/hashicorp/go-memdb/txn.go
new file mode 100644
index 0000000000..6228677dac
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/go-memdb/txn.go
@@ -0,0 +1,475 @@
+package memdb
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/hashicorp/go-immutable-radix"
+)
+
+const (
+ id = "id"
+)
+
+// tableIndex is a tuple of (Table, Index) used for lookups
+type tableIndex struct {
+ Table string
+ Index string
+}
+
+// Txn is a transaction against a MemDB.
+// This can be a read or write transaction.
+type Txn struct {
+ db *MemDB
+ write bool
+ rootTxn *iradix.Txn
+ after []func()
+
+ modified map[tableIndex]*iradix.Txn
+}
+
+// readableIndex returns a transaction usable for reading the given
+// index in a table. If a write transaction is in progress, we may need
+// to use an existing modified txn.
+func (txn *Txn) readableIndex(table, index string) *iradix.Txn {
+ // Look for existing transaction
+ if txn.write && txn.modified != nil {
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist
+ }
+ }
+
+ // Create a read transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+ return indexTxn
+}
+
+// writableIndex returns a transaction usable for modifying the
+// given index in a table.
+func (txn *Txn) writableIndex(table, index string) *iradix.Txn {
+ if txn.modified == nil {
+ txn.modified = make(map[tableIndex]*iradix.Txn)
+ }
+
+ // Look for existing transaction
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist
+ }
+
+ // Start a new transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+
+ // Keep this open for the duration of the txn
+ txn.modified[key] = indexTxn
+ return indexTxn
+}
+
+// Abort is used to cancel this transaction.
+// This is a noop for read transactions.
+func (txn *Txn) Abort() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+}
+
+// Commit is used to finalize this transaction.
+// This is a noop for read transactions.
+func (txn *Txn) Commit() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Commit each sub-transaction scoped to (table, index)
+ for key, subTxn := range txn.modified {
+ path := indexPath(key.Table, key.Index)
+ final := subTxn.Commit()
+ txn.rootTxn.Insert(path, final)
+ }
+
+ // Update the root of the DB
+ newRoot := txn.rootTxn.Commit()
+ atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+
+ // Run the deferred functions, if any
+ for i := len(txn.after); i > 0; i-- {
+ fn := txn.after[i-1]
+ fn()
+ }
+}
+
+// Insert is used to add or update an object into the given table
+func (txn *Txn) Insert(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot insert in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ ok, idVal, err := idSchema.Indexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, to see if this is an update
+ idTxn := txn.writableIndex(table, id)
+ existing, update := idTxn.Get(idVal)
+
+ // On an update, there is an existing object with the given
+ // primary ID. We do the update by deleting the current object
+ // and inserting the new object.
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Determine the new index value
+ ok, val, err := indexSchema.Indexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if ok && !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+
+ // Handle the update by deleting from the index first
+ if update {
+ okExist, valExist, err := indexSchema.Indexer.FromObject(existing)
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if okExist {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if !indexSchema.Unique {
+ valExist = append(valExist, idVal...)
+ }
+
+ // If we are writing to the same index with the same value,
+ // we can avoid the delete as the insert will overwrite the
+ // value anyways.
+ if !bytes.Equal(valExist, val) {
+ indexTxn.Delete(valExist)
+ }
+ }
+ }
+
+ // If there is no index value, either this is an error or an expected
+ // case and we can skip updating
+ if !ok {
+ if indexSchema.AllowMissing {
+ continue
+ } else {
+ return fmt.Errorf("missing value for index '%s'", name)
+ }
+ }
+
+ // Update the value of the index
+ indexTxn.Insert(val, obj)
+ }
+ return nil
+}
+
+// Delete is used to delete a single object from the given table
+// This object must already exist in the table
+func (txn *Txn) Delete(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ ok, idVal, err := idSchema.Indexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, check fi we should continue
+ idTxn := txn.writableIndex(table, id)
+ existing, ok := idTxn.Get(idVal)
+ if !ok {
+ return fmt.Errorf("not found")
+ }
+
+ // Remove the object from all the indexes
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Handle the update by deleting from the index first
+ ok, val, err := indexSchema.Indexer.FromObject(existing)
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if ok {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+ indexTxn.Delete(val)
+ }
+ }
+ return nil
+}
+
+// DeleteAll is used to delete all the objects in a given table
+// matching the constraints on the index
+func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) {
+ if !txn.write {
+ return 0, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get all the objects
+ iter, err := txn.Get(table, index, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ // Put them into a slice so there are no safety concerns while actually
+ // performing the deletes
+ var objs []interface{}
+ for {
+ obj := iter.Next()
+ if obj == nil {
+ break
+ }
+
+ objs = append(objs, obj)
+ }
+
+ // Do the deletes
+ num := 0
+ for _, obj := range objs {
+ if err := txn.Delete(table, obj); err != nil {
+ return num, err
+ }
+ num++
+ }
+ return num, nil
+}
+
+// First is used to return the first matching object for
+// the given constraints on the index
+func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) {
+ // Get the index value
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+ // Do an exact lookup
+ if indexSchema.Unique && val != nil && indexSchema.Name == index {
+ obj, ok := indexTxn.Get(val)
+ if !ok {
+ return nil, nil
+ }
+ return obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the first value
+ iter := indexTxn.Root().Iterator()
+ iter.SeekPrefix(val)
+ _, value, _ := iter.Next()
+ return value, nil
+}
+
+// LongestPrefix is used to fetch the longest prefix match for the given
+// constraints on the index. Note that this will not work with the memdb
+// StringFieldIndex because it adds null terminators which prevent the
+// algorithm from correctly finding a match (it will get to right before the
+// null and fail to find a leaf node). This should only be used where the prefix
+// given is capable of matching indexed entries directly, which typically only
+// applies to a custom indexer. See the unit test for an example.
+func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) {
+ // Enforce that this only works on prefix indexes.
+ if !strings.HasSuffix(index, "_prefix") {
+ return nil, fmt.Errorf("must use '%s_prefix' on index", index)
+ }
+
+ // Get the index value.
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // This algorithm only makes sense against a unique index, otherwise the
+ // index keys will have the IDs appended to them.
+ if !indexSchema.Unique {
+ return nil, fmt.Errorf("index '%s' is not unique", index)
+ }
+
+ // Find the longest prefix match with the given index.
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ if _, value, ok := indexTxn.Root().LongestPrefix(val); ok {
+ return value, nil
+ }
+ return nil, nil
+}
+
+// getIndexValue is used to get the IndexSchema and the value
+// used to scan the index given the parameters. This handles prefix based
+// scans when the index has the "_prefix" suffix. The index must support
+// prefix iteration.
+func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) {
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Check for a prefix scan
+ prefixScan := false
+ if strings.HasSuffix(index, "_prefix") {
+ index = strings.TrimSuffix(index, "_prefix")
+ prefixScan = true
+ }
+
+ // Get the index schema
+ indexSchema, ok := tableSchema.Indexes[index]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid index '%s'", index)
+ }
+
+ // Hot-path for when there are no arguments
+ if len(args) == 0 {
+ return indexSchema, nil, nil
+ }
+
+ // Special case the prefix scanning
+ if prefixScan {
+ prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer)
+ if !ok {
+ return indexSchema, nil,
+ fmt.Errorf("index '%s' does not support prefix scanning", index)
+ }
+
+ val, err := prefixIndexer.PrefixFromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+ }
+
+ // Get the exact match index
+ val, err := indexSchema.Indexer.FromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+}
+
+// ResultIterator is used to iterate over a list of results
+// from a Get query on a table.
+type ResultIterator interface {
+ Next() interface{}
+}
+
+// Get is used to construct a ResultIterator over all the
+// rows that match the given constraints of an index.
+func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) {
+ // Get the index value to scan
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ indexRoot := indexTxn.Root()
+
+ // Get an interator over the index
+ indexIter := indexRoot.Iterator()
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekPrefix(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// Defer is used to push a new arbitrary function onto a stack which
+// gets called when a transaction is committed and finished. Deferred
+// functions are called in LIFO order, and only invoked at the end of
+// write transactions.
+func (txn *Txn) Defer(fn func()) {
+ txn.after = append(txn.after, fn)
+}
+
+// radixIterator is used to wrap an underlying iradix iterator.
+// This is much mroe efficient than a sliceIterator as we are not
+// materializing the entire view.
+type radixIterator struct {
+ iter *iradix.Iterator
+}
+
+func (r *radixIterator) Next() interface{} {
+ _, value, ok := r.iter.Next()
+ if !ok {
+ return nil
+ }
+ return value
+}
diff --git a/vendor/src/github.com/hashicorp/golang-lru/LICENSE b/vendor/src/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 0000000000..be2cc4dfb6
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/src/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/src/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 0000000000..68d097a1c0
--- /dev/null
+++ b/vendor/src/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,160 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("Must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occured.
+func (c *LRU) Add(key, value interface{}) bool {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Check if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) bool {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (interface{}, interface{}, bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/src/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 0000000000..13f15dfce0
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2013 Matt T. Proud
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 0000000000..66d9b5458f
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ headerBuf := make([]byte, binary.MaxVarintLen32)
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 0000000000..c318385cbe
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 0000000000..4b76ea9a1d
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, binary.MaxVarintLen32)
+ encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/src/github.com/mreiferson/go-httpclient/.gitignore b/vendor/src/github.com/mreiferson/go-httpclient/.gitignore
new file mode 100644
index 0000000000..dbec55fb6f
--- /dev/null
+++ b/vendor/src/github.com/mreiferson/go-httpclient/.gitignore
@@ -0,0 +1 @@
+*.sw[op]
diff --git a/vendor/src/github.com/mreiferson/go-httpclient/.travis.yml b/vendor/src/github.com/mreiferson/go-httpclient/.travis.yml
new file mode 100644
index 0000000000..ba1b6b7f9f
--- /dev/null
+++ b/vendor/src/github.com/mreiferson/go-httpclient/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+go:
+ - 1.1
+install:
+ - go get github.com/bmizerany/assert
+script:
+ - pushd $TRAVIS_BUILD_DIR
+ - go test
+ - popd
+notifications:
+ email: false
diff --git a/vendor/src/github.com/mreiferson/go-httpclient/LICENSE b/vendor/src/github.com/mreiferson/go-httpclient/LICENSE
new file mode 100644
index 0000000000..89de354795
--- /dev/null
+++ b/vendor/src/github.com/mreiferson/go-httpclient/LICENSE
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/src/github.com/mreiferson/go-httpclient/README.md b/vendor/src/github.com/mreiferson/go-httpclient/README.md
new file mode 100644
index 0000000000..6d0dbff945
--- /dev/null
+++ b/vendor/src/github.com/mreiferson/go-httpclient/README.md
@@ -0,0 +1,41 @@
+## go-httpclient
+
+**requires Go 1.1+** as of `v0.4.0` the API has been completely re-written for Go 1.1 (for a Go
+1.0.x compatible release see [1adef50](https://github.com/mreiferson/go-httpclient/tree/1adef50))
+
+[![Build
+Status](https://secure.travis-ci.org/mreiferson/go-httpclient.png?branch=master)](http://travis-ci.org/mreiferson/go-httpclient)
+
+Provides an HTTP Transport that implements the `RoundTripper` interface and
+can be used as a built in replacement for the standard library's, providing:
+
+ * connection timeouts
+ * request timeouts
+
+This is a thin wrapper around `http.Transport` that sets dial timeouts and uses
+Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API.
+
+### Example
+
+```go
+transport := &httpclient.Transport{
+ ConnectTimeout: 1*time.Second,
+ RequestTimeout: 10*time.Second,
+ ResponseHeaderTimeout: 5*time.Second,
+}
+defer transport.Close()
+
+client := &http.Client{Transport: transport}
+req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil)
+resp, err := client.Do(req)
+if err != nil {
+ return err
+}
+defer resp.Body.Close()
+```
+
+*Note:* you will want to re-use a single client object rather than creating one for each request, otherwise you will end up [leaking connections](https://code.google.com/p/go/issues/detail?id=4049#c3).
+
+### Reference Docs
+
+For API docs see [godoc](http://godoc.org/github.com/mreiferson/go-httpclient).
diff --git a/vendor/src/github.com/mreiferson/go-httpclient/httpclient.go b/vendor/src/github.com/mreiferson/go-httpclient/httpclient.go
new file mode 100644
index 0000000000..89e018bffd
--- /dev/null
+++ b/vendor/src/github.com/mreiferson/go-httpclient/httpclient.go
@@ -0,0 +1,237 @@
+/*
+Provides an HTTP Transport that implements the `RoundTripper` interface and
+can be used as a built in replacement for the standard library's, providing:
+
+ * connection timeouts
+ * request timeouts
+
+This is a thin wrapper around `http.Transport` that sets dial timeouts and uses
+Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API.
+*/
+package httpclient
+
+import (
+ "crypto/tls"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+// returns the current version of the package
+func Version() string {
+ return "0.4.1"
+}
+
+// Transport implements the RoundTripper interface and can be used as a replacement
+// for Go's built in http.Transport implementing end-to-end request timeouts.
+//
+// transport := &httpclient.Transport{
+// ConnectTimeout: 1*time.Second,
+// ResponseHeaderTimeout: 5*time.Second,
+// RequestTimeout: 10*time.Second,
+// }
+// defer transport.Close()
+//
+// client := &http.Client{Transport: transport}
+// req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil)
+// resp, err := client.Do(req)
+// if err != nil {
+// return err
+// }
+// defer resp.Body.Close()
+//
+type Transport struct {
+ // Proxy specifies a function to return a proxy for a given
+ // *http.Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *url.URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // Dial specifies the dial function for creating TCP
+ // connections. This will override the Transport's ConnectTimeout and
+ // ReadWriteTimeout settings.
+ // If Dial is nil, a dialer is generated on demand matching the Transport's
+ // options.
+ Dial func(network, addr string) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // DisableKeepAlives, if true, prevents re-use of TCP connections
+ // between different HTTP requests.
+ DisableKeepAlives bool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // MaxIdleConnsPerHost, if non-zero, controls the maximum idle
+ // (keep-alive) to keep per-host. If zero,
+ // http.DefaultMaxIdleConnsPerHost is used.
+ MaxIdleConnsPerHost int
+
+ // ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
+ // a connect to complete.
+ ConnectTimeout time.Duration
+
+ // ResponseHeaderTimeout, if non-zero, specifies the amount of
+ // time to wait for a server's response headers after fully
+ // writing the request (including its body, if any). This
+ // time does not include the time to read the response body.
+ ResponseHeaderTimeout time.Duration
+
+ // RequestTimeout, if non-zero, specifies the amount of time for the entire
+ // request to complete (including all of the above timeouts + entire response body).
+ // This should never be less than the sum total of the above two timeouts.
+ RequestTimeout time.Duration
+
+ // ReadWriteTimeout, if non-zero, will set a deadline for every Read and
+ // Write operation on the request connection.
+ ReadWriteTimeout time.Duration
+
+ // TCPWriteBufferSize, the size of the operating system's write
+ // buffer associated with the connection.
+ TCPWriteBufferSize int
+
+ // TCPReadBuffserSize, the size of the operating system's read
+ // buffer associated with the connection.
+ TCPReadBufferSize int
+
+ starter sync.Once
+ transport *http.Transport
+}
+
+// Close cleans up the Transport, currently a no-op
+func (t *Transport) Close() error {
+ return nil
+}
+
+func (t *Transport) lazyStart() {
+ if t.Dial == nil {
+ t.Dial = func(netw, addr string) (net.Conn, error) {
+ c, err := net.DialTimeout(netw, addr, t.ConnectTimeout)
+ if err != nil {
+ return nil, err
+ }
+
+ if t.TCPReadBufferSize != 0 || t.TCPWriteBufferSize != 0 {
+ if tcpCon, ok := c.(*net.TCPConn); ok {
+ if t.TCPWriteBufferSize != 0 {
+ if err = tcpCon.SetWriteBuffer(t.TCPWriteBufferSize); err != nil {
+ return nil, err
+ }
+ }
+ if t.TCPReadBufferSize != 0 {
+ if err = tcpCon.SetReadBuffer(t.TCPReadBufferSize); err != nil {
+ return nil, err
+ }
+ }
+ } else {
+ err = errors.New("Not Tcp Connection")
+ return nil, err
+ }
+ }
+
+ if t.ReadWriteTimeout > 0 {
+ timeoutConn := &rwTimeoutConn{
+ TCPConn: c.(*net.TCPConn),
+ rwTimeout: t.ReadWriteTimeout,
+ }
+ return timeoutConn, nil
+ }
+ return c, nil
+ }
+ }
+
+ t.transport = &http.Transport{
+ Dial: t.Dial,
+ Proxy: t.Proxy,
+ TLSClientConfig: t.TLSClientConfig,
+ DisableKeepAlives: t.DisableKeepAlives,
+ DisableCompression: t.DisableCompression,
+ MaxIdleConnsPerHost: t.MaxIdleConnsPerHost,
+ ResponseHeaderTimeout: t.ResponseHeaderTimeout,
+ }
+}
+
+func (t *Transport) CancelRequest(req *http.Request) {
+ t.starter.Do(t.lazyStart)
+
+ t.transport.CancelRequest(req)
+}
+
+func (t *Transport) CloseIdleConnections() {
+ t.starter.Do(t.lazyStart)
+
+ t.transport.CloseIdleConnections()
+}
+
+func (t *Transport) RegisterProtocol(scheme string, rt http.RoundTripper) {
+ t.starter.Do(t.lazyStart)
+
+ t.transport.RegisterProtocol(scheme, rt)
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ t.starter.Do(t.lazyStart)
+
+ if t.RequestTimeout > 0 {
+ timer := time.AfterFunc(t.RequestTimeout, func() {
+ t.transport.CancelRequest(req)
+ })
+
+ resp, err = t.transport.RoundTrip(req)
+ if err != nil {
+ timer.Stop()
+ } else {
+ resp.Body = &bodyCloseInterceptor{ReadCloser: resp.Body, timer: timer}
+ }
+ } else {
+ resp, err = t.transport.RoundTrip(req)
+ }
+
+ return
+}
+
+type bodyCloseInterceptor struct {
+ io.ReadCloser
+ timer *time.Timer
+}
+
+func (bci *bodyCloseInterceptor) Close() error {
+ bci.timer.Stop()
+ return bci.ReadCloser.Close()
+}
+
+// A net.Conn that sets a deadline for every Read or Write operation
+type rwTimeoutConn struct {
+ *net.TCPConn
+ rwTimeout time.Duration
+}
+
+func (c *rwTimeoutConn) Read(b []byte) (int, error) {
+ err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout))
+ if err != nil {
+ return 0, err
+ }
+ return c.TCPConn.Read(b)
+}
+
+func (c *rwTimeoutConn) Write(b []byte) (int, error) {
+ err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout))
+ if err != nil {
+ return 0, err
+ }
+ return c.TCPConn.Write(b)
+}
diff --git a/vendor/src/github.com/pivotal-golang/clock/LICENSE b/vendor/src/github.com/pivotal-golang/clock/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/vendor/src/github.com/pivotal-golang/clock/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/src/github.com/pivotal-golang/clock/README.md b/vendor/src/github.com/pivotal-golang/clock/README.md
new file mode 100644
index 0000000000..9741b8b457
--- /dev/null
+++ b/vendor/src/github.com/pivotal-golang/clock/README.md
@@ -0,0 +1 @@
+Provides a `Clock` interface, useful for injecting time dependencies in tests.
diff --git a/vendor/src/github.com/pivotal-golang/clock/clock.go b/vendor/src/github.com/pivotal-golang/clock/clock.go
new file mode 100644
index 0000000000..9c7322cd50
--- /dev/null
+++ b/vendor/src/github.com/pivotal-golang/clock/clock.go
@@ -0,0 +1,42 @@
+package clock
+
+import "time"
+
+type Clock interface {
+ Now() time.Time
+ Sleep(d time.Duration)
+ Since(t time.Time) time.Duration
+
+ NewTimer(d time.Duration) Timer
+ NewTicker(d time.Duration) Ticker
+}
+
+type realClock struct{}
+
+func NewClock() Clock {
+ return &realClock{}
+}
+
+func (clock *realClock) Now() time.Time {
+ return time.Now()
+}
+
+func (clock *realClock) Since(t time.Time) time.Duration {
+ return time.Now().Sub(t)
+}
+
+func (clock *realClock) Sleep(d time.Duration) {
+ <-clock.NewTimer(d).C()
+}
+
+func (clock *realClock) NewTimer(d time.Duration) Timer {
+ return &realTimer{
+ t: time.NewTimer(d),
+ }
+}
+
+func (clock *realClock) NewTicker(d time.Duration) Ticker {
+ return &realTicker{
+ t: time.NewTicker(d),
+ }
+}
diff --git a/vendor/src/github.com/pivotal-golang/clock/ticker.go b/vendor/src/github.com/pivotal-golang/clock/ticker.go
new file mode 100644
index 0000000000..f25129e1c8
--- /dev/null
+++ b/vendor/src/github.com/pivotal-golang/clock/ticker.go
@@ -0,0 +1,20 @@
+package clock
+
+import "time"
+
+type Ticker interface {
+ C() <-chan time.Time
+ Stop()
+}
+
+type realTicker struct {
+ t *time.Ticker
+}
+
+func (t *realTicker) C() <-chan time.Time {
+ return t.t.C
+}
+
+func (t *realTicker) Stop() {
+ t.t.Stop()
+}
diff --git a/vendor/src/github.com/pivotal-golang/clock/timer.go b/vendor/src/github.com/pivotal-golang/clock/timer.go
new file mode 100644
index 0000000000..cf8c221259
--- /dev/null
+++ b/vendor/src/github.com/pivotal-golang/clock/timer.go
@@ -0,0 +1,25 @@
+package clock
+
+import "time"
+
+type Timer interface {
+ C() <-chan time.Time
+ Reset(d time.Duration) bool
+ Stop() bool
+}
+
+type realTimer struct {
+ t *time.Timer
+}
+
+func (t *realTimer) C() <-chan time.Time {
+ return t.t.C
+}
+
+func (t *realTimer) Reset(d time.Duration) bool {
+ return t.t.Reset(d)
+}
+
+func (t *realTimer) Stop() bool {
+ return t.t.Stop()
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/LICENSE b/vendor/src/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/src/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 0000000000..3460f0346d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/README.md b/vendor/src/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000000..81032bed88
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1,53 @@
+# Overview
+This is the [Prometheus](http://www.prometheus.io) telemetric
+instrumentation client [Go](http://golang.org) client library. It
+enable authors to define process-space metrics for their servers and
+expose them through a web service interface for extraction,
+aggregation, and a whole slew of other post processing techniques.
+
+# Installing
+ $ go get github.com/prometheus/client_golang/prometheus
+
+# Example
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ indexed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "my_company",
+ Subsystem: "indexer",
+ Name: "documents_indexed",
+ Help: "The number of documents indexed.",
+ })
+ size = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "my_company",
+ Subsystem: "storage",
+ Name: "documents_total_size_bytes",
+ Help: "The total size of all documents in the storage.",
+ })
+)
+
+func main() {
+ http.Handle("/metrics", prometheus.Handler())
+
+ indexed.Inc()
+ size.Set(5)
+
+ http.ListenAndServe(":8080", nil)
+}
+
+func init() {
+ prometheus.MustRegister(indexed)
+ prometheus.MustRegister(size)
+}
+```
+
+# Documentation
+
+[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000000..c04688009f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+//
+// The stock metrics provided by this package (like Gauge, Counter, Summary) are
+// also Collectors (which only ever collect one metric, namely itself). An
+// implementer of Collector may, however, collect multiple metrics in a
+// coordinated fashion and/or create metrics on the fly. Examples for collectors
+// already implemented in this library are the metric vectors (i.e. collection
+// of multiple instances of the same Metric but with different label values)
+// like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by Prometheus when collecting metrics. The
+ // implementation sends each collected metric via the provided channel
+ // and returns once the last metric has been sent. The descriptor of
+ // each sent metric is one of those returned by Describe. Returned
+ // metrics that share the same descriptor must differ in their variable
+ // label values. This method may be called concurrently and must
+ // therefore be implemented in a concurrency safe way. Blocking occurs
+ // at the expense of total performance of rendering all registered
+ // metrics. Ideally, Collector implementations support concurrent
+ // readers.
+ Collect(chan<- Metric)
+}
+
+// SelfCollector implements Collector for a single Metric so that that the
+// Metric collects itself. Add it as an anonymous field to a struct that
+// implements Metric, and call Init with the Metric itself as an argument.
+type SelfCollector struct {
+ self Metric
+}
+
+// Init provides the SelfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *SelfCollector) Init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *SelfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *SelfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/src/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000000..a2952d1c88
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,175 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "hash/fnv"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.Init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.Init(result) // Init self-collection.
+ return result
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000000..fcde784d64
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,201 @@
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occured during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ h := fnv.New64a()
+ var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
+ for _, val := range labelValues {
+ b.Reset()
+ b.WriteString(val)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.id = h.Sum64()
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ h.Reset()
+ b.Reset()
+ b.WriteString(help)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ for _, labelName := range labelNames {
+ b.Reset()
+ b.WriteString(labelName)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.dimHash = h.Sum64()
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000000..425fe8793c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides embeddable metric primitives for servers and
+// standardized exposition of telemetry through a web services interface.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// To expose metrics registered with the Prometheus registry, an HTTP server
+// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+//
+// http.Handle("/metrics", prometheus.Handler())
+//
+// As a starting point a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// })
+// )
+//
+// func init() {
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.Inc()
+//
+// http.Handle("/metrics", prometheus.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter.
+// It also exports some stats about the HTTP usage of the /metrics
+// endpoint. (See the Handler function for more detail.)
+//
+// Two more advanced metric types are the Summary and Histogram.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// Those are all the parts needed for basic usage. Detailed documentation and
+// examples are provided below.
+//
+// Everything else this package offers is essentially for "power users" only. A
+// few pointers to "power user features":
+//
+// All the various ...Opts structs have a ConstLabels field for labels that
+// never change their value (which is only useful under special circumstances,
+// see documentation of the Opts type).
+//
+// The Untyped metric behaves like a Gauge, but signals the Prometheus server
+// not to assume anything about its type.
+//
+// Functions to fine-tune how the metric registry works: EnableCollectChecks,
+// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
+//
+// For custom metric collection, there are two entry points: Custom Metric
+// implementations and custom Collector implementations. A Metric is the
+// fundamental unit in the Prometheus data model: a sample at a point in time
+// together with its meta-data (like its fully-qualified name and any number of
+// pairs of label name and label value) that knows how to marshal itself into a
+// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
+// gets registered with the Prometheus registry and manages the collection of
+// one or more Metrics. Many parts of this package are building blocks for
+// Metrics and Collectors. Desc is the metric descriptor, actually used by all
+// metrics under the hood, and by Collectors to describe the Metrics to be
+// collected, but only to be dealt with by users if they implement their own
+// Metrics or Collectors. To create a Desc, the BuildFQName function will come
+// in handy. Other useful components for Metric and Collector implementation
+// include: LabelPairSorter to sort the DTO version of label pairs,
+// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
+// collection time, MetricVec to bundle custom Metrics into a metric vector
+// Collector, SelfCollector to make a custom Metric collect itself.
+//
+// A good example for a custom Collector is the ExpVarCollector included in this
+// package, which exports variables exported via the "expvar" package as
+// Prometheus metrics.
+package prometheus
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/expvar.go b/vendor/src/github.com/prometheus/client_golang/prometheus/expvar.go
new file mode 100644
index 0000000000..0f7630d53f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/expvar.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+// ExpvarCollector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the ExpvarCollector is inherently
+// slow. Thus, the ExpvarCollector is probably great for experiments and
+// prototying, but you should seriously consider a more direct implementation of
+// Prometheus metrics for monitoring production systems.
+//
+// Use NewExpvarCollector to create new instances.
+type ExpvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
+// to be registered with the Prometheus registry.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
+ return &ExpvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000000..ba8a402caf
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000000..8be2476951
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,263 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() *goCollector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Namespace: "go",
+ Name: "goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained by system. Sum of all system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes_total"),
+ "Total number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000000..f98a41bc89
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,450 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+var (
+ // DefBuckets are the default Histogram buckets. The default buckets are
+ // tailored to broadly measure the response time (in seconds) of a
+ // network service. Most likely, however, you will be required to define
+ // buckets customized to your use case.
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.Init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ SelfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/http.go b/vendor/src/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 0000000000..eabe602468
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,361 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
+// flexibility (at the cost of a more complex call syntax). As
+// InstrumentHandler, this function registers four metric collectors, but it
+// uses the provided SummaryOpts to create them. However, the fields "Name" and
+// "Help" in the SummaryOpts are ignored. "Name" is replaced by
+// "requests_total", "request_duration_microseconds", "request_size_bytes", and
+// "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
+// more flexibility (at the cost of a more complex call syntax). See
+// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := make(chan int)
+ urlLen := 0
+ if r.URL != nil {
+ urlLen = len(r.URL.String())
+ }
+ go computeApproximateRequestSize(r, out, urlLen)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/src/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000000..86fd81c108
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
+// Untyped, and Summary. Users can implement their own Metric types, but that
+// should be rarely needed. See the example for SelfCollector, which is also an
+// example for a user-implemented Metric.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Implementers of custom Metric types must observe concurrency safety
+ // as reads of this metric may occur at any time, and any blocking
+ // occurs at the expense of total performance of rendering all
+ // registered metrics. Ideally Metric implementations should support
+ // concurrent readers.
+ //
+ // The Prometheus client library attempts to minimize memory allocations
+ // and will provide a pre-existing reset dto.Metric pointer. Prometheus
+ // may recycle the dto.Metric proto message, so Metric implementations
+ // should just populate the provided dto.Metric and then should not keep
+ // any reference to it.
+ //
+ // While populating dto.Metric, labels must be sorted lexicographically.
+ // (Implementers may find LabelPairSorter useful for that.)
+ Write(*dto.Metric) error
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000000..d8cf0eda34
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) *processCollector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) *processCollector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/push.go b/vendor/src/github.com/prometheus/client_golang/prometheus/push.go
new file mode 100644
index 0000000000..1c33848a35
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/push.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+// Push triggers a metric collection by the default registry and pushes all
+// collected metrics to the Pushgateway specified by addr. See the Pushgateway
+// documentation for detailed implications of the job and instance
+// parameter. instance can be left empty. You can use just host:port or ip:port
+// as url, in which case 'http://' is added automatically. You can also include
+// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and instance will
+// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
+// to push to the Pushgateway.)
+func Push(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "PUT")
+}
+
+// PushAdd works like Push, but only previously pushed metrics with the same
+// name (and the same job and instance) will be replaced. (It uses HTTP method
+// 'POST' to push to the Pushgateway.)
+func PushAdd(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "POST")
+}
+
+// PushCollectors works like Push, but it does not collect from the default
+// registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "PUT", collectors...)
+}
+
+// PushAddCollectors works like PushAdd, but it does not collect from the
+// default registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "POST", collectors...)
+}
+
+func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
+ r := newRegistry()
+ for _, collector := range collectors {
+ if _, err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return r.Push(job, instance, url, method)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/src/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000000..5970aaeeba
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,726 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ defRegistry = newDefaultRegistry()
+ errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
+)
+
+// Constants relevant to the HTTP interface.
+const (
+ // APIVersion is the version of the format of the exported data. This
+ // will match this library's version, which subscribes to the Semantic
+ // Versioning scheme.
+ APIVersion = "0.0.4"
+
+ // DelimitedTelemetryContentType is the content type set on telemetry
+ // data responses in delimited protobuf format.
+ DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
+ // TextTelemetryContentType is the content type set on telemetry data
+ // responses in text format.
+ TextTelemetryContentType = `text/plain; version=` + APIVersion
+ // ProtoTextTelemetryContentType is the content type set on telemetry
+ // data responses in protobuf text format. (Only used for debugging.)
+ ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
+ // ProtoCompactTextTelemetryContentType is the content type set on
+ // telemetry data responses in protobuf compact text format. (Only used
+ // for debugging.)
+ ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
+
+ // Constants for object pools.
+ numBufs = 4
+ numMetricFamilies = 1000
+ numMetrics = 10000
+
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+
+ acceptEncodingHeader = "Accept-Encoding"
+ acceptHeader = "Accept"
+)
+
+// Handler returns the HTTP handler for the global Prometheus registry. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name). Usually the handler is used to handle the "/metrics" endpoint.
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", defRegistry)
+}
+
+// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
+// handler is not instrumented. This is useful if no instrumentation is desired
+// (for whatever reason) or if the instrumentation has to happen with a
+// different handler name (or with a different instrumentation approach
+// altogether). See the InstrumentHandler example.
+func UninstrumentedHandler() http.Handler {
+ return defRegistry
+}
+
+// Register registers a new Collector to be included in metrics collection. It
+// returns an error if the descriptors provided by the Collector are invalid or
+// if they - in combination with descriptors of already registered Collectors -
+// do not fulfill the consistency and uniqueness criteria described in the Desc
+// documentation.
+//
+// Do not register the same Collector multiple times concurrently. (Registering
+// the same Collector twice would result in an error anyway, but on top of that,
+// it is not safe to do so concurrently.)
+func Register(m Collector) error {
+ _, err := defRegistry.Register(m)
+ return err
+}
+
+// MustRegister works like Register but panics where Register would have
+// returned an error.
+func MustRegister(m Collector) {
+ err := Register(m)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// RegisterOrGet works like Register but does not return an error if a Collector
+// is registered that equals a previously registered Collector. (Two Collectors
+// are considered equal if their Describe method yields the same set of
+// descriptors.) Instead, the previously registered Collector is returned (which
+// is helpful if the new and previously registered Collectors are equal but not
+// identical, i.e. not pointers to the same object).
+//
+// As for Register, it is still not safe to call RegisterOrGet with the same
+// Collector multiple times concurrently.
+func RegisterOrGet(m Collector) (Collector, error) {
+ return defRegistry.RegisterOrGet(m)
+}
+
+// MustRegisterOrGet works like Register but panics where RegisterOrGet would
+// have returned an error.
+func MustRegisterOrGet(m Collector) Collector {
+ existing, err := RegisterOrGet(m)
+ if err != nil {
+ panic(err)
+ }
+ return existing
+}
+
+// Unregister unregisters the Collector that equals the Collector passed in as
+// an argument. (Two Collectors are considered equal if their Describe method
+// yields the same set of descriptors.) The function returns whether a Collector
+// was unregistered.
+func Unregister(c Collector) bool {
+ return defRegistry.Unregister(c)
+}
+
+// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
+// are collected. The hook function must be set before metrics collection begins
+// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
+// MetricFamily protobufs returned by the hook function are merged with the
+// metrics collected in the usual way.
+//
+// This is a way to directly inject MetricFamily protobufs managed and owned by
+// the caller. The caller has full responsibility. As no registration of the
+// injected metrics has happened, there is no descriptor to check against, and
+// there are no registration-time checks. If collect-time checks are disabled
+// (see function EnableCollectChecks), no sanity checks are performed on the
+// returned protobufs at all. If collect-checks are enabled, type and uniqueness
+// checks are performed, but no further consistency checks (which would require
+// knowledge of a metric descriptor).
+//
+// Sorting concerns: The caller is responsible for sorting the label pairs in
+// each metric. However, the order of metrics will be sorted by the registry as
+// it is required anyway after merging with the metric families collected
+// conventionally.
+//
+// The function must be callable at any time and concurrently.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ defRegistry.metricFamilyInjectionHook = hook
+}
+
+// PanicOnCollectError sets the behavior whether a panic is caused upon an error
+// while metrics are collected and served to the HTTP endpoint. By default, an
+// internal server error (status code 500) is served with an error message.
+func PanicOnCollectError(b bool) {
+ defRegistry.panicOnCollectError = b
+}
+
+// EnableCollectChecks enables (or disables) additional consistency checks
+// during metrics collection. These additional checks are not enabled by default
+// because they inflict a performance penalty and the errors they check for can
+// only happen if the used Metric and Collector types have internal programming
+// errors. It can be helpful to enable these checks while working with custom
+// Collectors or Metrics whose correctness is not well established yet.
+func EnableCollectChecks(b bool) {
+ defRegistry.collectChecksEnabled = b
+}
+
+// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
+// certain encoding. It returns the number of bytes written and any error
+// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
+// are encoders.
+type encoder func(io.Writer, *dto.MetricFamily) (int, error)
+
+type registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ bufPool chan *bytes.Buffer
+ metricFamilyPool chan *dto.MetricFamily
+ metricPool chan *dto.Metric
+ metricFamilyInjectionHook func() []*dto.MetricFamily
+
+ panicOnCollectError, collectChecksEnabled bool
+}
+
+func (r *registry) Register(c Collector) (Collector, error) {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ newDescIDs := map[uint64]struct{}{}
+ newDimHashesByName := map[string]uint64{}
+ var collectorID uint64 // Just a sum of all desc IDs.
+ var duplicateDescErr error
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return nil, errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return existing, errAlreadyReg
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return nil, duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return c, nil
+}
+
+func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
+ existing, err := r.Register(m)
+ if err != nil && err != errAlreadyReg {
+ return nil, err
+ }
+ return existing, nil
+}
+
+func (r *registry) Unregister(c Collector) bool {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ descIDs := map[uint64]struct{}{}
+ var collectorID uint64 // Just a sum of the desc IDs.
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+func (r *registry) Push(job, instance, pushURL, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
+ if instance != "" {
+ pushURL += "/instances/" + url.QueryEscape(instance)
+ }
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ return err
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
+ }
+ return nil
+}
+
+func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ contentType := expfmt.Negotiate(req.Header)
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+}
+
+func (r *registry) writePB(encoder expfmt.Encoder) error {
+ var metricHashes map[uint64]struct{}
+ if r.collectChecksEnabled {
+ metricHashes = make(map[uint64]struct{})
+ }
+ metricChan := make(chan Metric, capMetricChan)
+ wg := sync.WaitGroup{}
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for _ = range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if !ok {
+ metricFamily = r.getMetricFamily()
+ defer r.giveMetricFamily(metricFamily)
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ dtoMetric := r.getMetric()
+ defer r.giveMetric(dtoMetric)
+ if err := metric.Write(dtoMetric); err != nil {
+ // TODO: Consider different means of error reporting so
+ // that a single erroneous metric could be skipped
+ // instead of blowing up the whole collection.
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ switch {
+ case metricFamily.Type != nil:
+ // Type already set. We are good.
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+
+ if r.metricFamilyInjectionHook != nil {
+ for _, mf := range r.metricFamilyInjectionHook() {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if !exists {
+ metricFamiliesByName[mf.GetName()] = mf
+ if r.collectChecksEnabled {
+ for _, m := range mf.Metric {
+ if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+ }
+ for _, m := range mf.Metric {
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+
+ // Now that MetricFamilies are all set, sort their Metrics
+ // lexicographically by their label values.
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+
+ // Write out MetricFamilies sorted by their name.
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name := range metricFamiliesByName {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ if err := encoder.Encode(metricFamiliesByName[name]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := fnv.New64a()
+ var buf bytes.Buffer
+ buf.WriteString(metricFamily.GetName())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check. Label pairs must be sorted by contract. But the point of this
+ // method is to check for contract violations. So we better do the sort
+ // now.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ buf.Reset()
+ buf.WriteString(lp.GetValue())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ }
+ metricHash := h.Sum64()
+ if _, exists := metricHashes[metricHash]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ metricHashes[metricHash] = struct{}{}
+
+ if desc == nil {
+ return nil // Nothing left to check if we have no desc.
+ }
+
+ // Desc consistency with metric family.
+ if metricFamily.GetName() != desc.fqName {
+ return fmt.Errorf(
+ "collected metric %s %s has name %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
+ )
+ }
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+
+ r.mtx.RLock() // Remaining checks need the read lock.
+ defer r.mtx.RUnlock()
+
+ // Is the desc registered?
+ if _, exist := r.descIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+
+ return nil
+}
+
+func (r *registry) getBuf() *bytes.Buffer {
+ select {
+ case buf := <-r.bufPool:
+ return buf
+ default:
+ return &bytes.Buffer{}
+ }
+}
+
+func (r *registry) giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ select {
+ case r.bufPool <- buf:
+ default:
+ }
+}
+
+func (r *registry) getMetricFamily() *dto.MetricFamily {
+ select {
+ case mf := <-r.metricFamilyPool:
+ return mf
+ default:
+ return &dto.MetricFamily{}
+ }
+}
+
+func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
+ mf.Reset()
+ select {
+ case r.metricFamilyPool <- mf:
+ default:
+ }
+}
+
+func (r *registry) getMetric() *dto.Metric {
+ select {
+ case m := <-r.metricPool:
+ return m
+ default:
+ return &dto.Metric{}
+ }
+}
+
+func (r *registry) giveMetric(m *dto.Metric) {
+ m.Reset()
+ select {
+ case r.metricPool <- m:
+ default:
+ }
+}
+
+func newRegistry() *registry {
+ return &registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ bufPool: make(chan *bytes.Buffer, numBufs),
+ metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
+ metricPool: make(chan *dto.Metric, numMetrics),
+ }
+}
+
+func newDefaultRegistry() *registry {
+ r := newRegistry()
+ r.Register(NewProcessCollector(os.Getpid(), ""))
+ r.Register(NewGoCollector())
+ return r
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+ return true
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/src/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000000..fe81e004f6
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,540 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+var (
+ // DefObjectives are the default Summary quantile values.
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
+// method of perk/quantile is actually not working as advertised - and it might
+// be unfixable, as the underlying algorithm is apparently not capable of
+// merging summaries in the first place. To avoid using Merge, we are currently
+// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.Init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ SelfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000000..c65ab1c531
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/value.go b/vendor/src/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000000..b54ac11e88
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/src/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000000..a1f3bdf37d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,247 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "hash"
+ "sync"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects not only children, but also hash and buf.
+ children map[uint64]Metric
+ desc *Desc
+
+ // hash is our own hash instance to avoid repeated allocations.
+ hash hash.Hash64
+ // buf is used to copy string contents into it for hashing,
+ // again to avoid allocations.
+ buf bytes.Buffer
+
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metric := range m.children {
+ ch <- metric
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+ lvs := make([]string, len(labels))
+ for i, label := range m.desc.variableLabels {
+ lvs[i] = labels[label]
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, val := range vals {
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
+ metric, ok := m.children[hash]
+ if !ok {
+ // Copy labelValues. Otherwise, they would be allocated even if we don't go
+ // down this code path.
+ copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
+ metric = m.newMetric(copiedLabelValues...)
+ m.children[hash] = metric
+ }
+ return metric
+}
diff --git a/vendor/src/github.com/prometheus/client_model/LICENSE b/vendor/src/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/src/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000000..b065f8683f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/src/github.com/prometheus/client_model/ruby/LICENSE b/vendor/src/github.com/prometheus/client_model/ruby/LICENSE
new file mode 100644
index 0000000000..11069edd79
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_model/ruby/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/src/github.com/prometheus/common/LICENSE b/vendor/src/github.com/prometheus/common/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/prometheus/common/expfmt/decode.go b/vendor/src/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000000..b72c9bedef
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,411 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const (
+ textType = "text/plain"
+ jsonType = "application/json"
+ )
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+
+ case jsonType:
+ var prometheusAPIVersion string
+
+ if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
+ prometheusAPIVersion = params["version"]
+ } else {
+ prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
+ }
+
+ switch prometheusAPIVersion {
+ case "0.0.2", "":
+ return fmtJSON2
+ default:
+ return FmtUnknown
+ }
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ case fmtJSON2:
+ return newJSON2Decoder(r)
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ return err
+}
+
+// textDecoder implements the Decoder interface for the text protcol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/encode.go b/vendor/src/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000000..392ca90ee2
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "bitbucket.org/ww/goautoneg"
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/expfmt.go b/vendor/src/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000000..366fbde98a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+
+ // fmtJSON2 is hidden as it is deprecated.
+ fmtJSON2 Format = `application/json; version=0.0.2`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz.go b/vendor/src/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000000..14f9201469
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/client_golang/text
+// go-fuzz -bin text-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/json_decode.go b/vendor/src/github.com/prometheus/common/expfmt/json_decode.go
new file mode 100644
index 0000000000..67e3a0d4d6
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/json_decode.go
@@ -0,0 +1,162 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
+)
+
+type json2Decoder struct {
+ dec *json.Decoder
+ fams []*dto.MetricFamily
+}
+
+func newJSON2Decoder(r io.Reader) Decoder {
+ return &json2Decoder{
+ dec: json.NewDecoder(r),
+ }
+}
+
+type histogram002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Values map[string]float64 `json:"value"`
+}
+
+type counter002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Value float64 `json:"value"`
+}
+
+func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
+ labels := base.Clone().Merge(ext)
+ delete(labels, model.MetricNameLabel)
+
+ names := make([]string, 0, len(labels))
+ for ln := range labels {
+ names = append(names, string(ln))
+ }
+ sort.Strings(names)
+
+ pairs := make([]*dto.LabelPair, 0, len(labels))
+
+ for _, ln := range names {
+ lv := labels[model.LabelName(ln)]
+
+ pairs = append(pairs, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(string(lv)),
+ })
+ }
+
+ return pairs
+}
+
+func (d *json2Decoder) more() error {
+ var entities []struct {
+ BaseLabels model.LabelSet `json:"baseLabels"`
+ Docstring string `json:"docstring"`
+ Metric struct {
+ Type string `json:"type"`
+ Values json.RawMessage `json:"value"`
+ } `json:"metric"`
+ }
+
+ if err := d.dec.Decode(&entities); err != nil {
+ return err
+ }
+ for _, e := range entities {
+ f := &dto.MetricFamily{
+ Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
+ Help: proto.String(e.Docstring),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{},
+ }
+
+ d.fams = append(d.fams, f)
+
+ switch e.Metric.Type {
+ case "counter", "gauge":
+ var values []counter002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, ctr := range values {
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: protoLabelSet(e.BaseLabels, ctr.Labels),
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(ctr.Value),
+ },
+ })
+ }
+
+ case "histogram":
+ var values []histogram002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, hist := range values {
+ quants := make([]string, 0, len(values))
+ for q := range hist.Values {
+ quants = append(quants, q)
+ }
+
+ sort.Strings(quants)
+
+ for _, q := range quants {
+ value := hist.Values[q]
+ // The correct label is "quantile" but to not break old expressions
+ // this remains "percentile"
+ hist.Labels["percentile"] = model.LabelValue(q)
+
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: protoLabelSet(e.BaseLabels, hist.Labels),
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(value),
+ },
+ })
+ }
+ }
+
+ default:
+ return fmt.Errorf("unknown metric type %q", e.Metric.Type)
+ }
+ }
+ return nil
+}
+
+// Decode implements the Decoder interface.
+func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
+ if len(d.fams) == 0 {
+ if err := d.more(); err != nil {
+ return err
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/text_create.go b/vendor/src/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000000..0bb9c14cc2
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,305 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ result := bytes.NewBuffer(make([]byte, 0, len(v)))
+ for _, c := range v {
+ switch {
+ case c == '\\':
+ result.WriteString(`\\`)
+ case includeDoubleQuote && c == '"':
+ result.WriteString(`\"`)
+ case c == '\n':
+ result.WriteString(`\n`)
+ default:
+ result.WriteRune(c)
+ }
+ }
+ return result.String()
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/text_parse.go b/vendor/src/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000000..84433bc4f6
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,746 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/model/alert.go b/vendor/src/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000000..b027e9f3db
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,109 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(time.Now())
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/src/github.com/prometheus/common/model/fingerprinting.go b/vendor/src/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000000..fc4de4106e
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/src/github.com/prometheus/common/model/labels.go b/vendor/src/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000000..6459c8f791
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,188 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/src/github.com/prometheus/common/model/labelset.go b/vendor/src/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000000..142b9d1e2d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,153 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !LabelNameRE.MatchString(string(ln)) {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/common/model/metric.go b/vendor/src/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000000..25fc3c9425
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,81 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+var separator = []byte{0}
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
diff --git a/vendor/src/github.com/prometheus/common/model/model.go b/vendor/src/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000000..88f013a47a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus componenets and libraries.
+package model
diff --git a/vendor/src/github.com/prometheus/common/model/signature.go b/vendor/src/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000000..28f370065a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,190 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "bytes"
+ "hash"
+ "hash/fnv"
+ "sort"
+ "sync"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = fnv.New64a().Sum64()
+
+ hashAndBufPool sync.Pool
+)
+
+type hashAndBuf struct {
+ h hash.Hash64
+ b bytes.Buffer
+}
+
+func getHashAndBuf() *hashAndBuf {
+ hb := hashAndBufPool.Get()
+ if hb == nil {
+ return &hashAndBuf{h: fnv.New64a()}
+ }
+ return hb.(*hashAndBuf)
+}
+
+func putHashAndBuf(hb *hashAndBuf) {
+ hb.h.Reset()
+ hb.b.Reset()
+ hashAndBufPool.Put(hb)
+}
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(labelName)
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(labels[labelName])
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(ls[labelName]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return Fingerprint(hb.h.Sum64())
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for labelName, labelValue := range ls {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(labelValue))
+ hb.h.Write(hb.b.Bytes())
+ result ^= hb.h.Sum64()
+ hb.h.Reset()
+ hb.b.Reset()
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(m) == 0 || len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, label := range labels {
+ hb.b.WriteString(string(label))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(m[label]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(m[labelName]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
diff --git a/vendor/src/github.com/prometheus/common/model/silence.go b/vendor/src/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000000..b4b96eae9d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
diff --git a/vendor/src/github.com/prometheus/common/model/time.go b/vendor/src/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000000..ebc8bf6cc8
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/time.go
@@ -0,0 +1,230 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ durSeconds, _ := strconv.Atoi(matches[1])
+ dur := time.Duration(durSeconds) * time.Second
+ unit := matches[2]
+ switch unit {
+ case "d":
+ dur *= 60 * 60 * 24
+ case "h":
+ dur *= 60 * 60
+ case "m":
+ dur *= 60
+ case "s":
+ dur *= 1
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
+
+func (d Duration) String() string {
+ seconds := int64(time.Duration(d) / time.Second)
+ factors := map[string]int64{
+ "d": 60 * 60 * 24,
+ "h": 60 * 60,
+ "m": 60,
+ "s": 1,
+ }
+ unit := "s"
+ switch int64(0) {
+ case seconds % factors["d"]:
+ unit = "d"
+ case seconds % factors["h"]:
+ unit = "h"
+ case seconds % factors["m"]:
+ unit = "m"
+ }
+ return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/common/model/value.go b/vendor/src/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000000..10ffb0bd61
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/value.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+func (v SampleValue) Equal(o SampleValue) bool {
+ return v == o
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value != o.Value {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/src/github.com/prometheus/procfs/.travis.yml b/vendor/src/github.com/prometheus/procfs/.travis.yml
new file mode 100644
index 0000000000..25e169dd01
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/.travis.yml
@@ -0,0 +1,7 @@
+sudo: false
+language: go
+go:
+ - 1.3
+ - 1.4
+ - 1.5
+ - tip
diff --git a/vendor/src/github.com/prometheus/procfs/AUTHORS.md b/vendor/src/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 0000000000..f1c27ccb01
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,20 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt <ts@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Armen Baghumian <abaghumian@noggin.com.au>
+* Bjoern Rabenstein <beorn@soundcloud.com>
+* David Cournapeau <cournape@gmail.com>
+* Ji-Hoon, Seol <jihoon.seol@gmail.com>
+* Jonas Große Sundrup <cherti@letopolis.de>
+* Julius Volz <julius@soundcloud.com>
+* Matthias Rampke <mr@soundcloud.com>
+* Nicky Gerritsen <nicky@streamone.nl>
+* Rémi Audebert <contact@halfr.net>
+* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000000..5705f0fbea
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/src/github.com/prometheus/procfs/LICENSE b/vendor/src/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/prometheus/procfs/Makefile b/vendor/src/github.com/prometheus/procfs/Makefile
new file mode 100644
index 0000000000..e8acbbc5ec
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,6 @@
+ci:
+ go fmt
+ go vet
+ go test -v ./...
+ go get github.com/golang/lint/golint
+ golint *.go
diff --git a/vendor/src/github.com/prometheus/procfs/NOTICE b/vendor/src/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000000..53c5e9aa11
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/src/github.com/prometheus/procfs/README.md b/vendor/src/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000000..6e7ee6b8b7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/README.md
@@ -0,0 +1,10 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/src/github.com/prometheus/procfs/doc.go b/vendor/src/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000000..e2acd6d40a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/src/github.com/prometheus/procfs/fs.go b/vendor/src/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000000..6a8d97b11e
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,40 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+func (fs FS) stat(p string) (os.FileInfo, error) {
+ return os.Stat(path.Join(string(fs), p))
+}
+
+func (fs FS) open(p string) (*os.File, error) {
+ return os.Open(path.Join(string(fs), p))
+}
+
+func (fs FS) readlink(p string) (string, error) {
+ return os.Readlink(path.Join(string(fs), p))
+}
diff --git a/vendor/src/github.com/prometheus/procfs/ipvs.go b/vendor/src/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 0000000000..26da5000e3
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,223 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := fs.open("net/ip_vs_stats")
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := fs.open("net/ip_vs")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ tmp := strings.SplitN(s, ":", 2)
+
+ if len(tmp) != 2 {
+ return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
+ }
+
+ if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
+ return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ }
+
+ ip, err := hex.DecodeString(tmp[0])
+ if err != nil {
+ return nil, 0, err
+ }
+
+ port, err := strconv.ParseUint(tmp[1], 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/mdstat.go b/vendor/src/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 0000000000..09ed6b5ebc
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,158 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := path.Join(string(fs), "mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStatusFile := string(content)
+
+ lines := strings.Split(mdStatusFile, "\n")
+ var currentMD string
+
+ // Each md has at least the deviceline, statusline and one empty line afterwards
+ // so we will have probably something of the order len(lines)/3 devices
+ // so we use that for preallocation.
+ estimateMDs := len(lines) / 3
+ mdStates := make([]MDStat, 0, estimateMDs)
+
+ for i, l := range lines {
+ if l == "" {
+ // Skip entirely empty lines.
+ continue
+ }
+
+ if l[0] == ' ' {
+ // Those lines are not the beginning of a md-section.
+ continue
+ }
+
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ // We aren't interested in lines with general info.
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ currentMD = mainLine[0] // name of md-device
+ activityState := mainLine[2] // activity status of said md-device
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf("error parsing %s: entry for %s has fewer lines than expected", mdStatusFilePath, currentMD)
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1]) // parse statusline, always present
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ //
+ // Now get the number of synced blocks.
+ //
+
+ // Get the line number of the syncing-line.
+ var j int
+ if strings.Contains(lines[i+2], "bitmap") { // then skip the bitmap line
+ j = i + 3
+ } else {
+ j = i + 2
+ }
+
+ // If device is syncing at the moment, get the number of currently synced bytes,
+ // otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{currentMD, activityState, active, total, size, syncedBlocks})
+
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+
+ // +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
+ if len(matches) != 3+1 {
+ return 0, 0, 0, fmt.Errorf("unexpected number matches found in statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
+ }
+
+ return active, total, size, nil
+}
+
+// Gets the size that has already been synced out of the sync-line.
+func evalBuildline(buildline string) (int64, error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+
+ // +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
+ if len(matches) < 1+1 {
+ return 0, fmt.Errorf("too few matches found in buildline: %s", buildline)
+ }
+
+ if len(matches) > 1+1 {
+ return 0, fmt.Errorf("too many matches found in buildline: %s", buildline)
+ }
+
+ syncedSize, err := strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedSize, nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc.go b/vendor/src/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000000..efc8502789
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,202 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently avaible processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := fs.readlink("self")
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently avaible processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := fs.open("")
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := p.open("cmdline")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := p.readlink("exe")
+
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := p.readlink("fd/" + name)
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := p.open("fd")
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) open(pa string) (*os.File, error) {
+ return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
+}
+
+func (p Proc) readlink(pa string) (string, error) {
+ return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa))
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_io.go b/vendor/src/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 0000000000..7c6dc86970
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,54 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := p.open("io")
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_limits.go b/vendor/src/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000000..9f080b9f62
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,111 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits.
+type ProcLimits struct {
+ CPUTime int
+ FileSize int
+ DataSize int
+ StackSize int
+ CoreFileSize int
+ ResidentSet int
+ Processes int
+ OpenFiles int
+ LockedMemory int
+ AddressSpace int
+ FileLocks int
+ PendingSignals int
+ MsqqueueSize int
+ NicePriority int
+ RealtimePriority int
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := p.open("limits")
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_stat.go b/vendor/src/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000000..30a403b6c7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
+// required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic.
+// After much research it was determined that USER_HZ is actually hardcoded to
+// 100 on all Go-supported platforms as of the time of this writing. This is
+// why we decided to hardcode it here as well. It is not impossible that there
+// could be systems with exceptions, but they should be very exotic edge cases,
+// and in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := p.open("stat")
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/src/github.com/prometheus/procfs/stat.go b/vendor/src/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000000..26fefb0fa0
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := fs.open("stat")
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/Makefile b/vendor/src/github.com/vishvananda/netlink/Makefile
index 1b977de4de..8dc5a92e98 100644
--- a/vendor/src/github.com/vishvananda/netlink/Makefile
+++ b/vendor/src/github.com/vishvananda/netlink/Makefile
@@ -11,14 +11,14 @@ goroot = $(addprefix ../../../,$(1))
unroot = $(subst ../../../,,$(1))
fmt = $(addprefix fmt-,$(1))
-all: fmt test
+all: test
$(call goroot,$(DEPS)):
go get $(call unroot,$@)
.PHONY: $(call testdirs,$(DIRS))
$(call testdirs,$(DIRS)):
- sudo -E go test -v github.com/vishvananda/netlink/$@
+ sudo -E go test -test.parallel 4 -timeout 60s -v github.com/vishvananda/netlink/$@
$(call fmt,$(call testdirs,$(DIRS))):
! gofmt -l $(subst fmt-,,$@)/*.go | grep ''
diff --git a/vendor/src/github.com/vishvananda/netlink/addr_linux.go b/vendor/src/github.com/vishvananda/netlink/addr_linux.go
index 9e4f62f1d5..b5eec65052 100644
--- a/vendor/src/github.com/vishvananda/netlink/addr_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/addr_linux.go
@@ -2,6 +2,7 @@ package netlink
import (
"fmt"
+ "log"
"net"
"strings"
"syscall"
@@ -15,24 +16,35 @@ const IFA_FLAGS = 0x8
// AddrAdd will add an IP address to a link device.
// Equivalent to: `ip addr add $addr dev $link`
func AddrAdd(link Link, addr *Addr) error {
+ return pkgHandle.AddrAdd(link, addr)
+}
- req := nl.NewNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
- return addrHandle(link, addr, req)
+// AddrAdd will add an IP address to a link device.
+// Equivalent to: `ip addr add $addr dev $link`
+func (h *Handle) AddrAdd(link Link, addr *Addr) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return h.addrHandle(link, addr, req)
}
// AddrDel will delete an IP address from a link device.
// Equivalent to: `ip addr del $addr dev $link`
func AddrDel(link Link, addr *Addr) error {
- req := nl.NewNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK)
- return addrHandle(link, addr, req)
+ return pkgHandle.AddrDel(link, addr)
+}
+
+// AddrDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func (h *Handle) AddrDel(link Link, addr *Addr) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK)
+ return h.addrHandle(link, addr, req)
}
-func addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
+func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
base := link.Attrs()
if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) {
return fmt.Errorf("label must begin with interface name")
}
- ensureIndex(base)
+ h.ensureIndex(base)
family := nl.GetIPFamily(addr.IP)
@@ -57,10 +69,14 @@ func addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
req.AddData(addressData)
if addr.Flags != 0 {
- b := make([]byte, 4)
- native.PutUint32(b, uint32(addr.Flags))
- flagsData := nl.NewRtAttr(IFA_FLAGS, b)
- req.AddData(flagsData)
+ if addr.Flags <= 0xff {
+ msg.IfAddrmsg.Flags = uint8(addr.Flags)
+ } else {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(addr.Flags))
+ flagsData := nl.NewRtAttr(IFA_FLAGS, b)
+ req.AddData(flagsData)
+ }
}
if addr.Label != "" {
@@ -76,7 +92,14 @@ func addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
// Equivalent to: `ip addr show`.
// The list can be filtered by link and ip family.
func AddrList(link Link, family int) ([]Addr, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP)
+ return pkgHandle.AddrList(link, family)
+}
+
+// AddrList gets a list of IP addresses in the system.
+// Equivalent to: `ip addr show`.
+// The list can be filtered by link and ip family.
+func (h *Handle) AddrList(link Link, family int) ([]Addr, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP)
msg := nl.NewIfInfomsg(family)
req.AddData(msg)
@@ -85,62 +108,125 @@ func AddrList(link Link, family int) ([]Addr, error) {
return nil, err
}
- index := 0
+ indexFilter := 0
if link != nil {
base := link.Attrs()
- ensureIndex(base)
- index = base.Index
+ h.ensureIndex(base)
+ indexFilter = base.Index
}
var res []Addr
for _, m := range msgs {
- msg := nl.DeserializeIfAddrmsg(m)
+ addr, msgFamily, ifindex, err := parseAddr(m)
+ if err != nil {
+ return res, err
+ }
- if link != nil && msg.Index != uint32(index) {
+ if link != nil && ifindex != indexFilter {
// Ignore messages from other interfaces
continue
}
- if family != FAMILY_ALL && msg.Family != uint8(family) {
+ if family != FAMILY_ALL && msgFamily != family {
continue
}
- attrs, err := nl.ParseRouteAttr(m[msg.Len():])
- if err != nil {
- return nil, err
- }
+ res = append(res, addr)
+ }
- var local, dst *net.IPNet
- var addr Addr
- for _, attr := range attrs {
- switch attr.Attr.Type {
- case syscall.IFA_ADDRESS:
- dst = &net.IPNet{
- IP: attr.Value,
- Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
- }
- case syscall.IFA_LOCAL:
- local = &net.IPNet{
- IP: attr.Value,
- Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
- }
- case syscall.IFA_LABEL:
- addr.Label = string(attr.Value[:len(attr.Value)-1])
- case IFA_FLAGS:
- addr.Flags = int(native.Uint32(attr.Value[0:4]))
+ return res, nil
+}
+
+func parseAddr(m []byte) (addr Addr, family, index int, err error) {
+ msg := nl.DeserializeIfAddrmsg(m)
+
+ family = -1
+ index = -1
+
+ attrs, err1 := nl.ParseRouteAttr(m[msg.Len():])
+ if err1 != nil {
+ err = err1
+ return
+ }
+
+ family = int(msg.Family)
+ index = int(msg.Index)
+
+ var local, dst *net.IPNet
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.IFA_ADDRESS:
+ dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
}
+ case syscall.IFA_LOCAL:
+ local = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+ }
+ case syscall.IFA_LABEL:
+ addr.Label = string(attr.Value[:len(attr.Value)-1])
+ case IFA_FLAGS:
+ addr.Flags = int(native.Uint32(attr.Value[0:4]))
}
+ }
- // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS
- if local != nil {
- addr.IPNet = local
- } else {
- addr.IPNet = dst
- }
- addr.Scope = int(msg.Scope)
+ // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS
+ if local != nil {
+ addr.IPNet = local
+ } else {
+ addr.IPNet = dst
+ }
+ addr.Scope = int(msg.Scope)
- res = append(res, addr)
+ return
+}
+
+type AddrUpdate struct {
+ LinkAddress net.IPNet
+ LinkIndex int
+ NewAddr bool // true=added false=deleted
+}
+
+// AddrSubscribe takes a chan down which notifications will be sent
+// when addresses change. Close the 'done' chan to stop subscription.
+func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
+ s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR)
+ if err != nil {
+ return err
}
+ if done != nil {
+ go func() {
+ <-done
+ s.Close()
+ }()
+ }
+ go func() {
+ defer close(ch)
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ log.Printf("netlink.AddrSubscribe: Receive() error: %v", err)
+ return
+ }
+ for _, m := range msgs {
+ msgType := m.Header.Type
+ if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR {
+ log.Printf("netlink.AddrSubscribe: bad message type: %d", msgType)
+ continue
+ }
- return res, nil
+ addr, _, ifindex, err := parseAddr(m.Data)
+ if err != nil {
+ log.Printf("netlink.AddrSubscribe: could not parse address: %v", err)
+ continue
+ }
+
+ ch <- AddrUpdate{LinkAddress: *addr.IPNet, LinkIndex: ifindex, NewAddr: msgType == syscall.RTM_NEWADDR}
+ }
+ }
+ }()
+
+ return nil
}
diff --git a/vendor/src/github.com/vishvananda/netlink/bpf_linux.go b/vendor/src/github.com/vishvananda/netlink/bpf_linux.go
new file mode 100644
index 0000000000..acd9490131
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/bpf_linux.go
@@ -0,0 +1,60 @@
+package netlink
+
+/*
+#include <asm/types.h>
+#include <asm/unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+
+static int load_simple_bpf(int prog_type) {
+#ifdef __NR_bpf
+ // { return 1; }
+ __u64 __attribute__((aligned(8))) insns[] = {
+ 0x00000001000000b7ull,
+ 0x0000000000000095ull,
+ };
+ __u8 __attribute__((aligned(8))) license[] = "ASL2";
+ // Copied from a header file since libc is notoriously slow to update.
+ // The call will succeed or fail and that will be our indication on
+ // whether or not it is supported.
+ struct {
+ __u32 prog_type;
+ __u32 insn_cnt;
+ __u64 insns;
+ __u64 license;
+ __u32 log_level;
+ __u32 log_size;
+ __u64 log_buf;
+ __u32 kern_version;
+ } __attribute__((aligned(8))) attr = {
+ .prog_type = prog_type,
+ .insn_cnt = 2,
+ .insns = (uintptr_t)&insns,
+ .license = (uintptr_t)&license,
+ };
+ return syscall(__NR_bpf, 5, &attr, sizeof(attr));
+#else
+ errno = EINVAL;
+ return -1;
+#endif
+}
+*/
+import "C"
+
+type BpfProgType C.int
+
+const (
+ BPF_PROG_TYPE_UNSPEC BpfProgType = iota
+ BPF_PROG_TYPE_SOCKET_FILTER
+ BPF_PROG_TYPE_KPROBE
+ BPF_PROG_TYPE_SCHED_CLS
+ BPF_PROG_TYPE_SCHED_ACT
+)
+
+// loadSimpleBpf loads a trivial bpf program for testing purposes
+func loadSimpleBpf(progType BpfProgType) (int, error) {
+ fd, err := C.load_simple_bpf(C.int(progType))
+ return int(fd), err
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/class.go b/vendor/src/github.com/vishvananda/netlink/class.go
index 264e3ad003..4577304100 100644
--- a/vendor/src/github.com/vishvananda/netlink/class.go
+++ b/vendor/src/github.com/vishvananda/netlink/class.go
@@ -9,7 +9,7 @@ type Class interface {
Type() string
}
-// Class represents a netlink class. A filter is associated with a link,
+// ClassAttrs represents a netlink class. A filter is associated with a link,
// has a handle and a parent. The root filter of a device should have a
// parent == HANDLE_ROOT.
type ClassAttrs struct {
@@ -20,7 +20,7 @@ type ClassAttrs struct {
}
func (q ClassAttrs) String() string {
- return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %s}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf)
+ return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf)
}
type HtbClassAttrs struct {
@@ -38,7 +38,7 @@ func (q HtbClassAttrs) String() string {
return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer)
}
-// Htb class
+// HtbClass represents an Htb class
type HtbClass struct {
ClassAttrs
Rate uint64
@@ -87,11 +87,11 @@ func (q HtbClass) String() string {
return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer)
}
-func (class *HtbClass) Attrs() *ClassAttrs {
- return &class.ClassAttrs
+func (q *HtbClass) Attrs() *ClassAttrs {
+ return &q.ClassAttrs
}
-func (class *HtbClass) Type() string {
+func (q *HtbClass) Type() string {
return "htb"
}
diff --git a/vendor/src/github.com/vishvananda/netlink/class_linux.go b/vendor/src/github.com/vishvananda/netlink/class_linux.go
index 4a52d2b997..be62abd9cb 100644
--- a/vendor/src/github.com/vishvananda/netlink/class_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/class_linux.go
@@ -10,15 +10,27 @@ import (
// ClassDel will delete a class from the system.
// Equivalent to: `tc class del $class`
func ClassDel(class Class) error {
- return classModify(syscall.RTM_DELTCLASS, 0, class)
+ return pkgHandle.ClassDel(class)
+}
+
+// ClassDel will delete a class from the system.
+// Equivalent to: `tc class del $class`
+func (h *Handle) ClassDel(class Class) error {
+ return h.classModify(syscall.RTM_DELTCLASS, 0, class)
}
// ClassChange will change a class in place
// Equivalent to: `tc class change $class`
// The parent and handle MUST NOT be changed.
-
func ClassChange(class Class) error {
- return classModify(syscall.RTM_NEWTCLASS, 0, class)
+ return pkgHandle.ClassChange(class)
+}
+
+// ClassChange will change a class in place
+// Equivalent to: `tc class change $class`
+// The parent and handle MUST NOT be changed.
+func (h *Handle) ClassChange(class Class) error {
+ return h.classModify(syscall.RTM_NEWTCLASS, 0, class)
}
// ClassReplace will replace a class to the system.
@@ -27,21 +39,36 @@ func ClassChange(class Class) error {
// If a class already exist with this parent/handle pair, the class is changed.
// If a class does not already exist with this parent/handle, a new class is created.
func ClassReplace(class Class) error {
- return classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class)
+ return pkgHandle.ClassReplace(class)
+}
+
+// ClassReplace will replace a class to the system.
+// quivalent to: `tc class replace $class`
+// The handle MAY be changed.
+// If a class already exist with this parent/handle pair, the class is changed.
+// If a class does not already exist with this parent/handle, a new class is created.
+func (h *Handle) ClassReplace(class Class) error {
+ return h.classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class)
}
// ClassAdd will add a class to the system.
// Equivalent to: `tc class add $class`
func ClassAdd(class Class) error {
- return classModify(
+ return pkgHandle.ClassAdd(class)
+}
+
+// ClassAdd will add a class to the system.
+// Equivalent to: `tc class add $class`
+func (h *Handle) ClassAdd(class Class) error {
+ return h.classModify(
syscall.RTM_NEWTCLASS,
syscall.NLM_F_CREATE|syscall.NLM_F_EXCL,
class,
)
}
-func classModify(cmd, flags int, class Class) error {
- req := nl.NewNetlinkRequest(cmd, flags|syscall.NLM_F_ACK)
+func (h *Handle) classModify(cmd, flags int, class Class) error {
+ req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK)
base := class.Attrs()
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
@@ -73,20 +100,20 @@ func classPayload(req *nl.NetlinkRequest, class Class) error {
opt.Prio = htb.Prio
// TODO: Handle Debug properly. For now default to 0
/* Calculate {R,C}Tab and set Rate and Ceil */
- cell_log := -1
- ccell_log := -1
+ cellLog := -1
+ ccellLog := -1
linklayer := nl.LINKLAYER_ETHERNET
mtu := 1600
var rtab [256]uint32
var ctab [256]uint32
tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)}
- if CalcRtable(&tcrate, rtab, cell_log, uint32(mtu), linklayer) < 0 {
- return errors.New("HTB: failed to calculate rate table.")
+ if CalcRtable(&tcrate, rtab, cellLog, uint32(mtu), linklayer) < 0 {
+ return errors.New("HTB: failed to calculate rate table")
}
opt.Rate = tcrate
tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)}
- if CalcRtable(&tcceil, ctab, ccell_log, uint32(mtu), linklayer) < 0 {
- return errors.New("HTB: failed to calculate ceil rate table.")
+ if CalcRtable(&tcceil, ctab, ccellLog, uint32(mtu), linklayer) < 0 {
+ return errors.New("HTB: failed to calculate ceil rate table")
}
opt.Ceil = tcceil
nl.NewRtAttrChild(options, nl.TCA_HTB_PARMS, opt.Serialize())
@@ -101,14 +128,21 @@ func classPayload(req *nl.NetlinkRequest, class Class) error {
// Equivalent to: `tc class show`.
// Generally returns nothing if link and parent are not specified.
func ClassList(link Link, parent uint32) ([]Class, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP)
+ return pkgHandle.ClassList(link, parent)
+}
+
+// ClassList gets a list of classes in the system.
+// Equivalent to: `tc class show`.
+// Generally returns nothing if link and parent are not specified.
+func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP)
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
Parent: parent,
}
if link != nil {
base := link.Attrs()
- ensureIndex(base)
+ h.ensureIndex(base)
msg.Ifindex = int32(base.Index)
}
req.AddData(msg)
diff --git a/vendor/src/github.com/vishvananda/netlink/filter.go b/vendor/src/github.com/vishvananda/netlink/filter.go
index 80ef34ded4..7e178ee00a 100644
--- a/vendor/src/github.com/vishvananda/netlink/filter.go
+++ b/vendor/src/github.com/vishvananda/netlink/filter.go
@@ -3,6 +3,7 @@ package netlink
import (
"errors"
"fmt"
+
"github.com/vishvananda/netlink/nl"
)
@@ -11,7 +12,7 @@ type Filter interface {
Type() string
}
-// Filter represents a netlink filter. A filter is associated with a link,
+// FilterAttrs represents a netlink filter. A filter is associated with a link,
// has a handle and a parent. The root filter of a device should have a
// parent == HANDLE_ROOT.
type FilterAttrs struct {
@@ -26,11 +27,170 @@ func (q FilterAttrs) String() string {
return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Priority: %d, Protocol: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Priority, q.Protocol)
}
+type TcAct int32
+
+const (
+ TC_ACT_UNSPEC TcAct = -1
+ TC_ACT_OK TcAct = 0
+ TC_ACT_RECLASSIFY TcAct = 1
+ TC_ACT_SHOT TcAct = 2
+ TC_ACT_PIPE TcAct = 3
+ TC_ACT_STOLEN TcAct = 4
+ TC_ACT_QUEUED TcAct = 5
+ TC_ACT_REPEAT TcAct = 6
+ TC_ACT_REDIRECT TcAct = 7
+ TC_ACT_JUMP TcAct = 0x10000000
+)
+
+func (a TcAct) String() string {
+ switch a {
+ case TC_ACT_UNSPEC:
+ return "unspec"
+ case TC_ACT_OK:
+ return "ok"
+ case TC_ACT_RECLASSIFY:
+ return "reclassify"
+ case TC_ACT_SHOT:
+ return "shot"
+ case TC_ACT_PIPE:
+ return "pipe"
+ case TC_ACT_STOLEN:
+ return "stolen"
+ case TC_ACT_QUEUED:
+ return "queued"
+ case TC_ACT_REPEAT:
+ return "repeat"
+ case TC_ACT_REDIRECT:
+ return "redirect"
+ case TC_ACT_JUMP:
+ return "jump"
+ }
+ return fmt.Sprintf("0x%x", a)
+}
+
+type TcPolAct int32
+
+const (
+ TC_POLICE_UNSPEC TcPolAct = TcPolAct(TC_ACT_UNSPEC)
+ TC_POLICE_OK TcPolAct = TcPolAct(TC_ACT_OK)
+ TC_POLICE_RECLASSIFY TcPolAct = TcPolAct(TC_ACT_RECLASSIFY)
+ TC_POLICE_SHOT TcPolAct = TcPolAct(TC_ACT_SHOT)
+ TC_POLICE_PIPE TcPolAct = TcPolAct(TC_ACT_PIPE)
+)
+
+func (a TcPolAct) String() string {
+ switch a {
+ case TC_POLICE_UNSPEC:
+ return "unspec"
+ case TC_POLICE_OK:
+ return "ok"
+ case TC_POLICE_RECLASSIFY:
+ return "reclassify"
+ case TC_POLICE_SHOT:
+ return "shot"
+ case TC_POLICE_PIPE:
+ return "pipe"
+ }
+ return fmt.Sprintf("0x%x", a)
+}
+
+type ActionAttrs struct {
+ Index int
+ Capab int
+ Action TcAct
+ Refcnt int
+ Bindcnt int
+}
+
+func (q ActionAttrs) String() string {
+ return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt)
+}
+
+// Action represents an action in any supported filter.
+type Action interface {
+ Attrs() *ActionAttrs
+ Type() string
+}
+
+type GenericAction struct {
+ ActionAttrs
+}
+
+func (action *GenericAction) Type() string {
+ return "generic"
+}
+
+func (action *GenericAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+type BpfAction struct {
+ ActionAttrs
+ Fd int
+ Name string
+}
+
+func (action *BpfAction) Type() string {
+ return "bpf"
+}
+
+func (action *BpfAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+type MirredAct uint8
+
+func (a MirredAct) String() string {
+ switch a {
+ case TCA_EGRESS_REDIR:
+ return "egress redir"
+ case TCA_EGRESS_MIRROR:
+ return "egress mirror"
+ case TCA_INGRESS_REDIR:
+ return "ingress redir"
+ case TCA_INGRESS_MIRROR:
+ return "ingress mirror"
+ }
+ return "unknown"
+}
+
+const (
+ TCA_EGRESS_REDIR MirredAct = 1 /* packet redirect to EGRESS*/
+ TCA_EGRESS_MIRROR MirredAct = 2 /* mirror packet to EGRESS */
+ TCA_INGRESS_REDIR MirredAct = 3 /* packet redirect to INGRESS*/
+ TCA_INGRESS_MIRROR MirredAct = 4 /* mirror packet to INGRESS */
+)
+
+type MirredAction struct {
+ ActionAttrs
+ MirredAction MirredAct
+ Ifindex int
+}
+
+func (action *MirredAction) Type() string {
+ return "mirred"
+}
+
+func (action *MirredAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+func NewMirredAction(redirIndex int) *MirredAction {
+ return &MirredAction{
+ ActionAttrs: ActionAttrs{
+ Action: TC_ACT_STOLEN,
+ },
+ MirredAction: TCA_EGRESS_REDIR,
+ Ifindex: redirIndex,
+ }
+}
+
// U32 filters on many packet related properties
type U32 struct {
FilterAttrs
- // Currently only supports redirecting to another interface
+ ClassId uint32
RedirIndex int
+ Actions []Action
}
func (filter *U32) Attrs() *FilterAttrs {
@@ -52,17 +212,18 @@ type FilterFwAttrs struct {
Rate uint32
AvRate uint32
PeakRate uint32
- Action int
+ Action TcPolAct
Overhead uint16
LinkLayer int
}
-// FwFilter filters on firewall marks
+// Fw filter filters on firewall marks
type Fw struct {
FilterAttrs
ClassId uint32
- Police nl.TcPolice
- InDev string
+ // TODO remove nl type from interface
+ Police nl.TcPolice
+ InDev string
// TODO Action
Mask uint32
AvRate uint32
@@ -73,8 +234,8 @@ type Fw struct {
func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) {
var rtab [256]uint32
var ptab [256]uint32
- rcell_log := -1
- pcell_log := -1
+ rcellLog := -1
+ pcellLog := -1
avrate := fattrs.AvRate / 8
police := nl.TcPolice{}
police.Rate.Rate = fattrs.Rate / 8
@@ -90,8 +251,8 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) {
if police.Rate.Rate != 0 {
police.Rate.Mpu = fattrs.Mpu
police.Rate.Overhead = fattrs.Overhead
- if CalcRtable(&police.Rate, rtab, rcell_log, fattrs.Mtu, linklayer) < 0 {
- return nil, errors.New("TBF: failed to calculate rate table.")
+ if CalcRtable(&police.Rate, rtab, rcellLog, fattrs.Mtu, linklayer) < 0 {
+ return nil, errors.New("TBF: failed to calculate rate table")
}
police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer)))
}
@@ -99,8 +260,8 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) {
if police.PeakRate.Rate != 0 {
police.PeakRate.Mpu = fattrs.Mpu
police.PeakRate.Overhead = fattrs.Overhead
- if CalcRtable(&police.PeakRate, ptab, pcell_log, fattrs.Mtu, linklayer) < 0 {
- return nil, errors.New("POLICE: failed to calculate peak rate table.")
+ if CalcRtable(&police.PeakRate, ptab, pcellLog, fattrs.Mtu, linklayer) < 0 {
+ return nil, errors.New("POLICE: failed to calculate peak rate table")
}
}
@@ -124,6 +285,22 @@ func (filter *Fw) Type() string {
return "fw"
}
+type BpfFilter struct {
+ FilterAttrs
+ ClassId uint32
+ Fd int
+ Name string
+ DirectAction bool
+}
+
+func (filter *BpfFilter) Type() string {
+ return "bpf"
+}
+
+func (filter *BpfFilter) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
// GenericFilter filters represent types that are not currently understood
// by this netlink library.
type GenericFilter struct {
diff --git a/vendor/src/github.com/vishvananda/netlink/filter_linux.go b/vendor/src/github.com/vishvananda/netlink/filter_linux.go
index 1dc688b124..2a8cf89022 100644
--- a/vendor/src/github.com/vishvananda/netlink/filter_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/filter_linux.go
@@ -12,7 +12,13 @@ import (
// FilterDel will delete a filter from the system.
// Equivalent to: `tc filter del $filter`
func FilterDel(filter Filter) error {
- req := nl.NewNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK)
+ return pkgHandle.FilterDel(filter)
+}
+
+// FilterDel will delete a filter from the system.
+// Equivalent to: `tc filter del $filter`
+func (h *Handle) FilterDel(filter Filter) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK)
base := filter.Attrs()
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
@@ -30,8 +36,14 @@ func FilterDel(filter Filter) error {
// FilterAdd will add a filter to the system.
// Equivalent to: `tc filter add $filter`
func FilterAdd(filter Filter) error {
+ return pkgHandle.FilterAdd(filter)
+}
+
+// FilterAdd will add a filter to the system.
+// Equivalent to: `tc filter add $filter`
+func (h *Handle) FilterAdd(filter Filter) error {
native = nl.NativeEndian()
- req := nl.NewNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ req := h.newNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
base := filter.Attrs()
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
@@ -52,17 +64,17 @@ func FilterAdd(filter Filter) error {
}
sel.Keys = append(sel.Keys, nl.TcU32Key{})
nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize())
- actions := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil)
- table := nl.NewRtAttrChild(actions, nl.TCA_ACT_TAB, nil)
- nl.NewRtAttrChild(table, nl.TCA_KIND, nl.ZeroTerminated("mirred"))
- // redirect to other interface
- mir := nl.TcMirred{
- Action: nl.TC_ACT_STOLEN,
- Eaction: nl.TCA_EGRESS_REDIR,
- Ifindex: uint32(u32.RedirIndex),
+ if u32.ClassId != 0 {
+ nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(u32.ClassId))
+ }
+ actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil)
+ // backwards compatibility
+ if u32.RedirIndex != 0 {
+ u32.Actions = append([]Action{NewMirredAction(u32.RedirIndex)}, u32.Actions...)
+ }
+ if err := encodeActions(actionsAttr, u32.Actions); err != nil {
+ return err
}
- aopts := nl.NewRtAttrChild(table, nl.TCA_OPTIONS, nil)
- nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mir.Serialize())
} else if fw, ok := filter.(*Fw); ok {
if fw.Mask != 0 {
b := make([]byte, 4)
@@ -90,6 +102,21 @@ func FilterAdd(filter Filter) error {
native.PutUint32(b, fw.ClassId)
nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b)
}
+ } else if bpf, ok := filter.(*BpfFilter); ok {
+ var bpfFlags uint32
+ if bpf.ClassId != 0 {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(bpf.ClassId))
+ }
+ if bpf.Fd >= 0 {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(bpf.Fd))))
+ }
+ if bpf.Name != "" {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(bpf.Name))
+ }
+ if bpf.DirectAction {
+ bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT
+ }
+ nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags))
}
req.AddData(options)
@@ -101,14 +128,21 @@ func FilterAdd(filter Filter) error {
// Equivalent to: `tc filter show`.
// Generally retunrs nothing if link and parent are not specified.
func FilterList(link Link, parent uint32) ([]Filter, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP)
+ return pkgHandle.FilterList(link, parent)
+}
+
+// FilterList gets a list of filters in the system.
+// Equivalent to: `tc filter show`.
+// Generally retunrs nothing if link and parent are not specified.
+func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP)
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
Parent: parent,
}
if link != nil {
base := link.Attrs()
- ensureIndex(base)
+ h.ensureIndex(base)
msg.Ifindex = int32(base.Index)
}
req.AddData(msg)
@@ -147,29 +181,34 @@ func FilterList(link Link, parent uint32) ([]Filter, error) {
filter = &U32{}
case "fw":
filter = &Fw{}
+ case "bpf":
+ filter = &BpfFilter{}
default:
filter = &GenericFilter{FilterType: filterType}
}
case nl.TCA_OPTIONS:
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
switch filterType {
case "u32":
- data, err := nl.ParseRouteAttr(attr.Value)
- if err != nil {
- return nil, err
- }
detailed, err = parseU32Data(filter, data)
if err != nil {
return nil, err
}
case "fw":
- data, err := nl.ParseRouteAttr(attr.Value)
+ detailed, err = parseFwData(filter, data)
if err != nil {
return nil, err
}
- detailed, err = parseFwData(filter, data)
+ case "bpf":
+ detailed, err = parseBpfData(filter, data)
if err != nil {
return nil, err
}
+ default:
+ detailed = true
}
}
}
@@ -183,6 +222,129 @@ func FilterList(link Link, parent uint32) ([]Filter, error) {
return res, nil
}
+func toTcGen(attrs *ActionAttrs, tcgen *nl.TcGen) {
+ tcgen.Index = uint32(attrs.Index)
+ tcgen.Capab = uint32(attrs.Capab)
+ tcgen.Action = int32(attrs.Action)
+ tcgen.Refcnt = int32(attrs.Refcnt)
+ tcgen.Bindcnt = int32(attrs.Bindcnt)
+}
+
+func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) {
+ attrs.Index = int(tcgen.Index)
+ attrs.Capab = int(tcgen.Capab)
+ attrs.Action = TcAct(tcgen.Action)
+ attrs.Refcnt = int(tcgen.Refcnt)
+ attrs.Bindcnt = int(tcgen.Bindcnt)
+}
+
+func encodeActions(attr *nl.RtAttr, actions []Action) error {
+ tabIndex := int(nl.TCA_ACT_TAB)
+
+ for _, action := range actions {
+ switch action := action.(type) {
+ default:
+ return fmt.Errorf("unknown action type %s", action.Type())
+ case *MirredAction:
+ table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ tabIndex++
+ nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred"))
+ aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ mirred := nl.TcMirred{
+ Eaction: int32(action.MirredAction),
+ Ifindex: uint32(action.Ifindex),
+ }
+ toTcGen(action.Attrs(), &mirred.TcGen)
+ nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mirred.Serialize())
+ case *BpfAction:
+ table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ tabIndex++
+ nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf"))
+ aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ gen := nl.TcGen{}
+ toTcGen(action.Attrs(), &gen)
+ nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_PARMS, gen.Serialize())
+ nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd)))
+ nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name))
+ case *GenericAction:
+ table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ tabIndex++
+ nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("gact"))
+ aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ gen := nl.TcGen{}
+ toTcGen(action.Attrs(), &gen)
+ nl.NewRtAttrChild(aopts, nl.TCA_GACT_PARMS, gen.Serialize())
+ }
+ }
+ return nil
+}
+
+func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
+ var actions []Action
+ for _, table := range tables {
+ var action Action
+ var actionType string
+ aattrs, err := nl.ParseRouteAttr(table.Value)
+ if err != nil {
+ return nil, err
+ }
+ nextattr:
+ for _, aattr := range aattrs {
+ switch aattr.Attr.Type {
+ case nl.TCA_KIND:
+ actionType = string(aattr.Value[:len(aattr.Value)-1])
+ // only parse if the action is mirred or bpf
+ switch actionType {
+ case "mirred":
+ action = &MirredAction{}
+ case "bpf":
+ action = &BpfAction{}
+ case "gact":
+ action = &GenericAction{}
+ default:
+ break nextattr
+ }
+ case nl.TCA_OPTIONS:
+ adata, err := nl.ParseRouteAttr(aattr.Value)
+ if err != nil {
+ return nil, err
+ }
+ for _, adatum := range adata {
+ switch actionType {
+ case "mirred":
+ switch adatum.Attr.Type {
+ case nl.TCA_MIRRED_PARMS:
+ mirred := *nl.DeserializeTcMirred(adatum.Value)
+ toAttrs(&mirred.TcGen, action.Attrs())
+ action.(*MirredAction).ActionAttrs = ActionAttrs{}
+ action.(*MirredAction).Ifindex = int(mirred.Ifindex)
+ action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction)
+ }
+ case "bpf":
+ switch adatum.Attr.Type {
+ case nl.TCA_ACT_BPF_PARMS:
+ gen := *nl.DeserializeTcGen(adatum.Value)
+ toAttrs(&gen, action.Attrs())
+ case nl.TCA_ACT_BPF_FD:
+ action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4]))
+ case nl.TCA_ACT_BPF_NAME:
+ action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1])
+ }
+ case "gact":
+ switch adatum.Attr.Type {
+ case nl.TCA_GACT_PARMS:
+ gen := *nl.DeserializeTcGen(adatum.Value)
+ toAttrs(&gen, action.Attrs())
+ }
+ }
+ }
+ }
+ }
+ actions = append(actions, action)
+ }
+ return actions, nil
+}
+
func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
native = nl.NativeEndian()
u32 := filter.(*U32)
@@ -197,34 +359,17 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error)
return detailed, nil
}
case nl.TCA_U32_ACT:
- table, err := nl.ParseRouteAttr(datum.Value)
+ tables, err := nl.ParseRouteAttr(datum.Value)
if err != nil {
return detailed, err
}
- if len(table) != 1 || table[0].Attr.Type != nl.TCA_ACT_TAB {
- return detailed, fmt.Errorf("Action table not formed properly")
+ u32.Actions, err = parseActions(tables)
+ if err != nil {
+ return detailed, err
}
- aattrs, err := nl.ParseRouteAttr(table[0].Value)
- for _, aattr := range aattrs {
- switch aattr.Attr.Type {
- case nl.TCA_KIND:
- actionType := string(aattr.Value[:len(aattr.Value)-1])
- // only parse if the action is mirred
- if actionType != "mirred" {
- return detailed, nil
- }
- case nl.TCA_OPTIONS:
- adata, err := nl.ParseRouteAttr(aattr.Value)
- if err != nil {
- return detailed, err
- }
- for _, adatum := range adata {
- switch adatum.Attr.Type {
- case nl.TCA_MIRRED_PARMS:
- mir := nl.DeserializeTcMirred(adatum.Value)
- u32.RedirIndex = int(mir.Ifindex)
- }
- }
+ for _, action := range u32.Actions {
+ if action, ok := action.(*MirredAction); ok {
+ u32.RedirIndex = int(action.Ifindex)
}
}
}
@@ -261,6 +406,28 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
return detailed, nil
}
+func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
+ native = nl.NativeEndian()
+ bpf := filter.(*BpfFilter)
+ detailed := true
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_BPF_FD:
+ bpf.Fd = int(native.Uint32(datum.Value[0:4]))
+ case nl.TCA_BPF_NAME:
+ bpf.Name = string(datum.Value[:len(datum.Value)-1])
+ case nl.TCA_BPF_CLASSID:
+ bpf.ClassId = native.Uint32(datum.Value[0:4])
+ case nl.TCA_BPF_FLAGS:
+ flags := native.Uint32(datum.Value[0:4])
+ if (flags & nl.TCA_BPF_FLAG_ACT_DIRECT) != 0 {
+ bpf.DirectAction = true
+ }
+ }
+ }
+ return detailed, nil
+}
+
func AlignToAtm(size uint) uint {
var linksize, cells int
cells = int(size / nl.ATM_CELL_PAYLOAD)
@@ -283,27 +450,27 @@ func AdjustSize(sz uint, mpu uint, linklayer int) uint {
}
}
-func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cell_log int, mtu uint32, linklayer int) int {
+func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cellLog int, mtu uint32, linklayer int) int {
bps := rate.Rate
mpu := rate.Mpu
var sz uint
if mtu == 0 {
mtu = 2047
}
- if cell_log < 0 {
- cell_log = 0
- for (mtu >> uint(cell_log)) > 255 {
- cell_log++
+ if cellLog < 0 {
+ cellLog = 0
+ for (mtu >> uint(cellLog)) > 255 {
+ cellLog++
}
}
for i := 0; i < 256; i++ {
- sz = AdjustSize(uint((i+1)<<uint32(cell_log)), uint(mpu), linklayer)
+ sz = AdjustSize(uint((i+1)<<uint32(cellLog)), uint(mpu), linklayer)
rtab[i] = uint32(Xmittime(uint64(bps), uint32(sz)))
}
rate.CellAlign = -1
- rate.CellLog = uint8(cell_log)
+ rate.CellLog = uint8(cellLog)
rate.Linklayer = uint8(linklayer & nl.TC_LINKLAYER_MASK)
- return cell_log
+ return cellLog
}
func DeserializeRtab(b []byte) [256]uint32 {
diff --git a/vendor/src/github.com/vishvananda/netlink/handle.go b/vendor/src/github.com/vishvananda/netlink/handle.go
new file mode 100644
index 0000000000..a96d2397f4
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/handle.go
@@ -0,0 +1,86 @@
+package netlink
+
+import (
+ "sync/atomic"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+// Empty handle used by the netlink package methods
+var pkgHandle = &Handle{}
+
+// Handle is an handle for the netlink requests
+// on a specific network namespace. All the requests
+// share the same netlink socket, which gets released
+// when the handle is deleted.
+type Handle struct {
+ seq uint32
+ routeSocket *nl.NetlinkSocket
+ xfrmSocket *nl.NetlinkSocket
+ lookupByDump bool
+}
+
+// NewHandle returns a netlink handle on the current network namespace.
+func NewHandle() (*Handle, error) {
+ return newHandle(netns.None(), netns.None())
+}
+
+// NewHandle returns a netlink handle on the network namespace
+// specified by ns. If ns=netns.None(), current network namespace
+// will be assumed
+func NewHandleAt(ns netns.NsHandle) (*Handle, error) {
+ return newHandle(ns, netns.None())
+}
+
+// NewHandleAtFrom works as NewHandle but allows client to specify the
+// new and the origin netns Handle.
+func NewHandleAtFrom(newNs, curNs netns.NsHandle) (*Handle, error) {
+ return newHandle(newNs, curNs)
+}
+
+func newHandle(newNs, curNs netns.NsHandle) (*Handle, error) {
+ var (
+ err error
+ rSocket *nl.NetlinkSocket
+ xSocket *nl.NetlinkSocket
+ )
+ rSocket, err = nl.GetNetlinkSocketAt(newNs, curNs, syscall.NETLINK_ROUTE)
+ if err != nil {
+ return nil, err
+ }
+ xSocket, err = nl.GetNetlinkSocketAt(newNs, curNs, syscall.NETLINK_XFRM)
+ if err != nil {
+ return nil, err
+ }
+ return &Handle{routeSocket: rSocket, xfrmSocket: xSocket}, nil
+}
+
+// Delete releases the resources allocated to this handle
+func (h *Handle) Delete() {
+ if h.routeSocket != nil {
+ h.routeSocket.Close()
+ }
+ if h.xfrmSocket != nil {
+ h.xfrmSocket.Close()
+ }
+ h.routeSocket, h.xfrmSocket = nil, nil
+}
+
+func (h *Handle) newNetlinkRequest(proto, flags int) *nl.NetlinkRequest {
+ // Do this so that package API still use nl package variable nextSeqNr
+ if h.routeSocket == nil {
+ return nl.NewNetlinkRequest(proto, flags)
+ }
+ return &nl.NetlinkRequest{
+ NlMsghdr: syscall.NlMsghdr{
+ Len: uint32(syscall.SizeofNlMsghdr),
+ Type: uint16(proto),
+ Flags: syscall.NLM_F_REQUEST | uint16(flags),
+ Seq: atomic.AddUint32(&h.seq, 1),
+ },
+ RouteSocket: h.routeSocket,
+ XfmrSocket: h.xfrmSocket,
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/link.go b/vendor/src/github.com/vishvananda/netlink/link.go
index 2934c0fb2a..4efd32cbf2 100644
--- a/vendor/src/github.com/vishvananda/netlink/link.go
+++ b/vendor/src/github.com/vishvananda/netlink/link.go
@@ -31,6 +31,7 @@ type LinkAttrs struct {
MasterIndex int // must be the index of a bridge
Namespace interface{} // nil | NsPid | NsFd
Alias string
+ Statistics *LinkStatistics
}
// NewLinkAttrs returns LinkAttrs structure filled with default values
@@ -40,6 +41,35 @@ func NewLinkAttrs() LinkAttrs {
}
}
+/*
+Ref: struct rtnl_link_stats {...}
+*/
+type LinkStatistics struct {
+ RxPackets uint32
+ TxPackets uint32
+ RxBytes uint32
+ TxBytes uint32
+ RxErrors uint32
+ TxErrors uint32
+ RxDropped uint32
+ TxDropped uint32
+ Multicast uint32
+ Collisions uint32
+ RxLengthErrors uint32
+ RxOverErrors uint32
+ RxCrcErrors uint32
+ RxFrameErrors uint32
+ RxFifoErrors uint32
+ RxMissedErrors uint32
+ TxAbortedErrors uint32
+ TxCarrierErrors uint32
+ TxFifoErrors uint32
+ TxHeartbeatErrors uint32
+ TxWindowErrors uint32
+ RxCompressed uint32
+ TxCompressed uint32
+}
+
// Device links cannot be created via netlink. These links
// are links created by udev like 'lo' and 'etho0'
type Device struct {
@@ -425,7 +455,7 @@ const (
BOND_AD_SELECT_COUNT
)
-// BondAdInfo
+// BondAdInfo represents ad info for bond
type BondAdInfo struct {
AggregatorId int
NumPorts int
@@ -526,7 +556,7 @@ func (bond *Bond) Type() string {
return "bond"
}
-// GreTap devices must specify LocalIP and RemoteIP on create
+// Gretap devices must specify LocalIP and RemoteIP on create
type Gretap struct {
LinkAttrs
IKey uint32
diff --git a/vendor/src/github.com/vishvananda/netlink/link_linux.go b/vendor/src/github.com/vishvananda/netlink/link_linux.go
index b3d0472004..0ed307dd25 100644
--- a/vendor/src/github.com/vishvananda/netlink/link_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/link_linux.go
@@ -12,6 +12,8 @@ import (
"github.com/vishvananda/netlink/nl"
)
+const SizeofLinkStats = 0x5c
+
var native = nl.NativeEndian()
var lookupByDump = false
@@ -33,12 +35,27 @@ func ensureIndex(link *LinkAttrs) {
}
}
+func (h *Handle) ensureIndex(link *LinkAttrs) {
+ if link != nil && link.Index == 0 {
+ newlink, _ := h.LinkByName(link.Name)
+ if newlink != nil {
+ link.Index = newlink.Attrs().Index
+ }
+ }
+}
+
// LinkSetUp enables the link device.
// Equivalent to: `ip link set $link up`
func LinkSetUp(link Link) error {
+ return pkgHandle.LinkSetUp(link)
+}
+
+// LinkSetUp enables the link device.
+// Equivalent to: `ip link set $link up`
+func (h *Handle) LinkSetUp(link Link) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Change = syscall.IFF_UP
@@ -53,9 +70,15 @@ func LinkSetUp(link Link) error {
// LinkSetDown disables link device.
// Equivalent to: `ip link set $link down`
func LinkSetDown(link Link) error {
+ return pkgHandle.LinkSetDown(link)
+}
+
+// LinkSetDown disables link device.
+// Equivalent to: `ip link set $link down`
+func (h *Handle) LinkSetDown(link Link) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Change = syscall.IFF_UP
@@ -70,9 +93,15 @@ func LinkSetDown(link Link) error {
// LinkSetMTU sets the mtu of the link device.
// Equivalent to: `ip link set $link mtu $mtu`
func LinkSetMTU(link Link, mtu int) error {
+ return pkgHandle.LinkSetMTU(link, mtu)
+}
+
+// LinkSetMTU sets the mtu of the link device.
+// Equivalent to: `ip link set $link mtu $mtu`
+func (h *Handle) LinkSetMTU(link Link, mtu int) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -91,9 +120,15 @@ func LinkSetMTU(link Link, mtu int) error {
// LinkSetName sets the name of the link device.
// Equivalent to: `ip link set $link name $name`
func LinkSetName(link Link, name string) error {
+ return pkgHandle.LinkSetName(link, name)
+}
+
+// LinkSetName sets the name of the link device.
+// Equivalent to: `ip link set $link name $name`
+func (h *Handle) LinkSetName(link Link, name string) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -109,9 +144,15 @@ func LinkSetName(link Link, name string) error {
// LinkSetAlias sets the alias of the link device.
// Equivalent to: `ip link set dev $link alias $name`
func LinkSetAlias(link Link, name string) error {
+ return pkgHandle.LinkSetAlias(link, name)
+}
+
+// LinkSetAlias sets the alias of the link device.
+// Equivalent to: `ip link set dev $link alias $name`
+func (h *Handle) LinkSetAlias(link Link, name string) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -127,9 +168,15 @@ func LinkSetAlias(link Link, name string) error {
// LinkSetHardwareAddr sets the hardware address of the link device.
// Equivalent to: `ip link set $link address $hwaddr`
func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+ return pkgHandle.LinkSetHardwareAddr(link, hwaddr)
+}
+
+// LinkSetHardwareAddr sets the hardware address of the link device.
+// Equivalent to: `ip link set $link address $hwaddr`
+func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -145,9 +192,15 @@ func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
// LinkSetVfHardwareAddr sets the hardware address of a vf for the link.
// Equivalent to: `ip link set $link vf $vf mac $hwaddr`
func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error {
+ return pkgHandle.LinkSetVfHardwareAddr(link, vf, hwaddr)
+}
+
+// LinkSetVfHardwareAddr sets the hardware address of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf mac $hwaddr`
+func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -169,9 +222,15 @@ func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error {
// LinkSetVfVlan sets the vlan of a vf for the link.
// Equivalent to: `ip link set $link vf $vf vlan $vlan`
func LinkSetVfVlan(link Link, vf, vlan int) error {
+ return pkgHandle.LinkSetVfVlan(link, vf, vlan)
+}
+
+// LinkSetVfVlan sets the vlan of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf vlan $vlan`
+func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -193,30 +252,48 @@ func LinkSetVfVlan(link Link, vf, vlan int) error {
// LinkSetMaster sets the master of the link device.
// Equivalent to: `ip link set $link master $master`
func LinkSetMaster(link Link, master *Bridge) error {
+ return pkgHandle.LinkSetMaster(link, master)
+}
+
+// LinkSetMaster sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func (h *Handle) LinkSetMaster(link Link, master *Bridge) error {
index := 0
if master != nil {
masterBase := master.Attrs()
- ensureIndex(masterBase)
+ h.ensureIndex(masterBase)
index = masterBase.Index
}
if index <= 0 {
return fmt.Errorf("Device does not exist")
}
- return LinkSetMasterByIndex(link, index)
+ return h.LinkSetMasterByIndex(link, index)
}
// LinkSetNoMaster removes the master of the link device.
// Equivalent to: `ip link set $link nomaster`
func LinkSetNoMaster(link Link) error {
- return LinkSetMasterByIndex(link, 0)
+ return pkgHandle.LinkSetNoMaster(link)
+}
+
+// LinkSetNoMaster removes the master of the link device.
+// Equivalent to: `ip link set $link nomaster`
+func (h *Handle) LinkSetNoMaster(link Link) error {
+ return h.LinkSetMasterByIndex(link, 0)
}
// LinkSetMasterByIndex sets the master of the link device.
// Equivalent to: `ip link set $link master $master`
func LinkSetMasterByIndex(link Link, masterIndex int) error {
+ return pkgHandle.LinkSetMasterByIndex(link, masterIndex)
+}
+
+// LinkSetMasterByIndex sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -236,9 +313,16 @@ func LinkSetMasterByIndex(link Link, masterIndex int) error {
// pid must be a pid of a running process.
// Equivalent to: `ip link set $link netns $pid`
func LinkSetNsPid(link Link, nspid int) error {
+ return pkgHandle.LinkSetNsPid(link, nspid)
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// pid must be a pid of a running process.
+// Equivalent to: `ip link set $link netns $pid`
+func (h *Handle) LinkSetNsPid(link Link, nspid int) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -258,9 +342,16 @@ func LinkSetNsPid(link Link, nspid int) error {
// fd must be an open file descriptor to a network namespace.
// Similar to: `ip link set $link netns $ns`
func LinkSetNsFd(link Link, fd int) error {
+ return pkgHandle.LinkSetNsFd(link, fd)
+}
+
+// LinkSetNsFd puts the device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `ip link set $link netns $ns`
+func (h *Handle) LinkSetNsFd(link Link, fd int) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -340,7 +431,7 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
}
if vxlan.Port > 0 {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, nl.Uint16Attr(uint16(vxlan.Port)))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port)))
}
if vxlan.PortLow > 0 || vxlan.PortHigh > 0 {
pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)}
@@ -434,9 +525,16 @@ func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) {
}
// LinkAdd adds a new link device. The type and features of the device
-// are taken fromt the parameters in the link object.
+// are taken from the parameters in the link object.
// Equivalent to: `ip link add $link`
func LinkAdd(link Link) error {
+ return pkgHandle.LinkAdd(link)
+}
+
+// LinkAdd adds a new link device. The type and features of the device
+// are taken fromt the parameters in the link object.
+// Equivalent to: `ip link add $link`
+func (h *Handle) LinkAdd(link Link) error {
// TODO: set mtu and hardware address
// TODO: support extra data for macvlan
base := link.Attrs()
@@ -473,17 +571,17 @@ func LinkAdd(link Link) error {
if errno != 0 {
return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno)
}
- ensureIndex(base)
+ h.ensureIndex(base)
// can't set master during create, so set it afterwards
if base.MasterIndex != 0 {
// TODO: verify MasterIndex is actually a bridge?
- return LinkSetMasterByIndex(link, base.MasterIndex)
+ return h.LinkSetMasterByIndex(link, base.MasterIndex)
}
return nil
}
- req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
// TODO: make it shorter
@@ -588,12 +686,12 @@ func LinkAdd(link Link) error {
return err
}
- ensureIndex(base)
+ h.ensureIndex(base)
// can't set master during create, so set it afterwards
if base.MasterIndex != 0 {
// TODO: verify MasterIndex is actually a bridge?
- return LinkSetMasterByIndex(link, base.MasterIndex)
+ return h.LinkSetMasterByIndex(link, base.MasterIndex)
}
return nil
}
@@ -602,11 +700,18 @@ func LinkAdd(link Link) error {
// the link object for it to be deleted. The other values are ignored.
// Equivalent to: `ip link del $link`
func LinkDel(link Link) error {
+ return pkgHandle.LinkDel(link)
+}
+
+// LinkDel deletes link device. Either Index or Name must be set in
+// the link object for it to be deleted. The other values are ignored.
+// Equivalent to: `ip link del $link`
+func (h *Handle) LinkDel(link Link) error {
base := link.Attrs()
- ensureIndex(base)
+ h.ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
+ req := h.newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
@@ -616,8 +721,8 @@ func LinkDel(link Link) error {
return err
}
-func linkByNameDump(name string) (Link, error) {
- links, err := LinkList()
+func (h *Handle) linkByNameDump(name string) (Link, error) {
+ links, err := h.LinkList()
if err != nil {
return nil, err
}
@@ -630,8 +735,8 @@ func linkByNameDump(name string) (Link, error) {
return nil, fmt.Errorf("Link %s not found", name)
}
-func linkByAliasDump(alias string) (Link, error) {
- links, err := LinkList()
+func (h *Handle) linkByAliasDump(alias string) (Link, error) {
+ links, err := h.LinkList()
if err != nil {
return nil, err
}
@@ -646,11 +751,16 @@ func linkByAliasDump(alias string) (Link, error) {
// LinkByName finds a link by name and returns a pointer to the object.
func LinkByName(name string) (Link, error) {
- if lookupByDump {
- return linkByNameDump(name)
+ return pkgHandle.LinkByName(name)
+}
+
+// LinkByName finds a link by name and returns a pointer to the object.
+func (h *Handle) LinkByName(name string) (Link, error) {
+ if h.lookupByDump {
+ return h.linkByNameDump(name)
}
- req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
req.AddData(msg)
@@ -662,8 +772,8 @@ func LinkByName(name string) (Link, error) {
if err == syscall.EINVAL {
// older kernels don't support looking up via IFLA_IFNAME
// so fall back to dumping all links
- lookupByDump = true
- return linkByNameDump(name)
+ h.lookupByDump = true
+ return h.linkByNameDump(name)
}
return link, err
@@ -672,11 +782,17 @@ func LinkByName(name string) (Link, error) {
// LinkByAlias finds a link by its alias and returns a pointer to the object.
// If there are multiple links with the alias it returns the first one
func LinkByAlias(alias string) (Link, error) {
- if lookupByDump {
- return linkByAliasDump(alias)
+ return pkgHandle.LinkByAlias(alias)
+}
+
+// LinkByAlias finds a link by its alias and returns a pointer to the object.
+// If there are multiple links with the alias it returns the first one
+func (h *Handle) LinkByAlias(alias string) (Link, error) {
+ if h.lookupByDump {
+ return h.linkByAliasDump(alias)
}
- req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
req.AddData(msg)
@@ -688,8 +804,8 @@ func LinkByAlias(alias string) (Link, error) {
if err == syscall.EINVAL {
// older kernels don't support looking up via IFLA_IFALIAS
// so fall back to dumping all links
- lookupByDump = true
- return linkByAliasDump(alias)
+ h.lookupByDump = true
+ return h.linkByAliasDump(alias)
}
return link, err
@@ -697,7 +813,12 @@ func LinkByAlias(alias string) (Link, error) {
// LinkByIndex finds a link by index and returns a pointer to the object.
func LinkByIndex(index int) (Link, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+ return pkgHandle.LinkByIndex(index)
+}
+
+// LinkByIndex finds a link by index and returns a pointer to the object.
+func (h *Handle) LinkByIndex(index int) (Link, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(index)
@@ -824,6 +945,8 @@ func linkDeserialize(m []byte) (Link, error) {
base.TxQLen = int(native.Uint32(attr.Value[0:4]))
case syscall.IFLA_IFALIAS:
base.Alias = string(attr.Value[:len(attr.Value)-1])
+ case syscall.IFLA_STATS:
+ base.Statistics = parseLinkStats(attr.Value[:])
}
}
// Links that don't have IFLA_INFO_KIND are hardware devices
@@ -838,9 +961,15 @@ func linkDeserialize(m []byte) (Link, error) {
// LinkList gets a list of link devices.
// Equivalent to: `ip link show`
func LinkList() ([]Link, error) {
+ return pkgHandle.LinkList()
+}
+
+// LinkList gets a list of link devices.
+// Equivalent to: `ip link show`
+func (h *Handle) LinkList() ([]Link, error) {
// NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need
// to get the message ourselves to parse link type.
- req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
req.AddData(msg)
@@ -904,33 +1033,57 @@ func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error {
}
func LinkSetHairpin(link Link, mode bool) error {
- return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE)
+ return pkgHandle.LinkSetHairpin(link, mode)
+}
+
+func (h *Handle) LinkSetHairpin(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE)
}
func LinkSetGuard(link Link, mode bool) error {
- return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
+ return pkgHandle.LinkSetGuard(link, mode)
+}
+
+func (h *Handle) LinkSetGuard(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
}
func LinkSetFastLeave(link Link, mode bool) error {
- return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE)
+ return pkgHandle.LinkSetFastLeave(link, mode)
+}
+
+func (h *Handle) LinkSetFastLeave(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE)
}
func LinkSetLearning(link Link, mode bool) error {
- return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING)
+ return pkgHandle.LinkSetLearning(link, mode)
+}
+
+func (h *Handle) LinkSetLearning(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING)
}
func LinkSetRootBlock(link Link, mode bool) error {
- return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT)
+ return pkgHandle.LinkSetRootBlock(link, mode)
+}
+
+func (h *Handle) LinkSetRootBlock(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT)
}
func LinkSetFlood(link Link, mode bool) error {
- return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
+ return pkgHandle.LinkSetFlood(link, mode)
+}
+
+func (h *Handle) LinkSetFlood(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
}
-func setProtinfoAttr(link Link, mode bool, attr int) error {
+func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
base := link.Attrs()
- ensureIndex(base)
- req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
msg.Index = int32(base.Index)
@@ -996,7 +1149,7 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
case nl.IFLA_VXLAN_LIMIT:
vxlan.Limit = int(native.Uint32(datum.Value[0:4]))
case nl.IFLA_VXLAN_PORT:
- vxlan.Port = int(native.Uint16(datum.Value[0:2]))
+ vxlan.Port = int(ntohs(datum.Value[0:2]))
case nl.IFLA_VXLAN_PORT_RANGE:
buf := bytes.NewBuffer(datum.Value[0:4])
var pr vxlanPortRange
@@ -1211,3 +1364,7 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) {
}
}
}
+
+func parseLinkStats(data []byte) *LinkStatistics {
+ return (*LinkStatistics)(unsafe.Pointer(&data[0:SizeofLinkStats][0]))
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/neigh_linux.go b/vendor/src/github.com/vishvananda/netlink/neigh_linux.go
index 2af693bab9..9b6cd2c256 100644
--- a/vendor/src/github.com/vishvananda/netlink/neigh_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/neigh_linux.go
@@ -67,30 +67,62 @@ func (msg *Ndmsg) Len() int {
// NeighAdd will add an IP to MAC mapping to the ARP table
// Equivalent to: `ip neigh add ....`
func NeighAdd(neigh *Neigh) error {
- return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL)
+ return pkgHandle.NeighAdd(neigh)
+}
+
+// NeighAdd will add an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh add ....`
+func (h *Handle) NeighAdd(neigh *Neigh) error {
+ return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL)
}
// NeighSet will add or replace an IP to MAC mapping to the ARP table
// Equivalent to: `ip neigh replace....`
func NeighSet(neigh *Neigh) error {
- return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE)
+ return pkgHandle.NeighSet(neigh)
+}
+
+// NeighSet will add or replace an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh replace....`
+func (h *Handle) NeighSet(neigh *Neigh) error {
+ return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE)
}
// NeighAppend will append an entry to FDB
// Equivalent to: `bridge fdb append...`
func NeighAppend(neigh *Neigh) error {
- return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND)
+ return pkgHandle.NeighAppend(neigh)
}
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func (h *Handle) NeighAppend(neigh *Neigh) error {
+ return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
func neighAdd(neigh *Neigh, mode int) error {
- req := nl.NewNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK)
+ return pkgHandle.neighAdd(neigh, mode)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func (h *Handle) neighAdd(neigh *Neigh, mode int) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK)
return neighHandle(neigh, req)
}
// NeighDel will delete an IP address from a link device.
// Equivalent to: `ip addr del $addr dev $link`
func NeighDel(neigh *Neigh) error {
- req := nl.NewNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK)
+ return pkgHandle.NeighDel(neigh)
+}
+
+// NeighDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func (h *Handle) NeighDel(neigh *Neigh) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK)
return neighHandle(neigh, req)
}
@@ -130,7 +162,14 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
// Equivalent to: `ip neighbor show`.
// The list can be filtered by link and ip family.
func NeighList(linkIndex, family int) ([]Neigh, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP)
+ return pkgHandle.NeighList(linkIndex, family)
+}
+
+// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// Equivalent to: `ip neighbor show`.
+// The list can be filtered by link and ip family.
+func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP)
msg := Ndmsg{
Family: uint8(family),
Index: uint32(linkIndex),
diff --git a/vendor/src/github.com/vishvananda/netlink/netlink.go b/vendor/src/github.com/vishvananda/netlink/netlink.go
index 687d8760dd..deafb6cfa7 100644
--- a/vendor/src/github.com/vishvananda/netlink/netlink.go
+++ b/vendor/src/github.com/vishvananda/netlink/netlink.go
@@ -14,8 +14,8 @@ import (
"github.com/vishvananda/netlink/nl"
)
+// Family type definitions
const (
- // Family type definitions
FAMILY_ALL = nl.FAMILY_ALL
FAMILY_V4 = nl.FAMILY_V4
FAMILY_V6 = nl.FAMILY_V6
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go
index e3afb5c232..f41821dd72 100644
--- a/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -6,9 +6,13 @@ import (
"encoding/binary"
"fmt"
"net"
+ "runtime"
+ "sync"
"sync/atomic"
"syscall"
"unsafe"
+
+ "github.com/vishvananda/netns"
)
const (
@@ -171,7 +175,9 @@ func (a *RtAttr) Serialize() []byte {
type NetlinkRequest struct {
syscall.NlMsghdr
- Data []NetlinkRequestData
+ Data []NetlinkRequestData
+ RouteSocket *NetlinkSocket
+ XfmrSocket *NetlinkSocket
}
// Serialize the Netlink Request into a byte array
@@ -206,11 +212,32 @@ func (req *NetlinkRequest) AddData(data NetlinkRequestData) {
// Returns a list of netlink messages in seriaized format, optionally filtered
// by resType.
func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) {
- s, err := getNetlinkSocket(sockType)
- if err != nil {
- return nil, err
+ var (
+ s *NetlinkSocket
+ err error
+ )
+
+ switch sockType {
+ case syscall.NETLINK_XFRM:
+ s = req.XfmrSocket
+ case syscall.NETLINK_ROUTE:
+ s = req.RouteSocket
+ default:
+ return nil, fmt.Errorf("Socket type %d is not handled", sockType)
+ }
+
+ sharedSocket := s != nil
+
+ if s == nil {
+ s, err = getNetlinkSocket(sockType)
+ if err != nil {
+ return nil, err
+ }
+ defer s.Close()
+ } else {
+ s.Lock()
+ defer s.Unlock()
}
- defer s.Close()
if err := s.Send(req); err != nil {
return nil, err
@@ -231,7 +258,10 @@ done:
}
for _, m := range msgs {
if m.Header.Seq != req.Seq {
- return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq)
+ if sharedSocket {
+ continue
+ }
+ return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq)
}
if m.Header.Pid != pid {
return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
@@ -276,6 +306,7 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest {
type NetlinkSocket struct {
fd int
lsa syscall.SockaddrNetlink
+ sync.Mutex
}
func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
@@ -295,6 +326,32 @@ func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
return s, nil
}
+// GetNetlinkSocketAt opens a netlink socket in the network namespace newNs
+// and positions the thread back into the network namespace specified by curNs,
+// when done. If curNs is close, the function derives the current namespace and
+// moves back into it when done. If newNs is close, the socket will be opened
+// in the current network namespace.
+func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSocket, error) {
+ var err error
+
+ if newNs.IsOpen() {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ if !curNs.IsOpen() {
+ if curNs, err = netns.Get(); err != nil {
+ return nil, fmt.Errorf("could not get current namespace while creating netlink socket: %v", err)
+ }
+ defer curNs.Close()
+ }
+ if err := netns.Set(newNs); err != nil {
+ return nil, fmt.Errorf("failed to set into network namespace %d while creating netlink socket: %v", newNs, err)
+ }
+ defer netns.Set(curNs)
+ }
+
+ return getNetlinkSocket(protocol)
+}
+
// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE)
// and subscribe it to multicast groups passed in variable argument list.
// Returns the netlink socket on which Receive() method can be called
@@ -323,6 +380,7 @@ func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) {
func (s *NetlinkSocket) Close() {
syscall.Close(s.fd)
+ s.fd = -1
}
func (s *NetlinkSocket) GetFd() int {
@@ -330,6 +388,9 @@ func (s *NetlinkSocket) GetFd() int {
}
func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
+ if s.fd < 0 {
+ return fmt.Errorf("Send called on a closed socket")
+ }
if err := syscall.Sendto(s.fd, request.Serialize(), 0, &s.lsa); err != nil {
return err
}
@@ -337,6 +398,9 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
}
func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
+ if s.fd < 0 {
+ return nil, fmt.Errorf("Receive called on a closed socket")
+ }
rb := make([]byte, syscall.Getpagesize())
nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
if err != nil {
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/tc_linux.go
index aa59005772..e91fb21c55 100644
--- a/vendor/src/github.com/vishvananda/netlink/nl/tc_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/nl/tc_linux.go
@@ -50,6 +50,15 @@ const (
)
const (
+ TCA_ACT_UNSPEC = iota
+ TCA_ACT_KIND
+ TCA_ACT_OPTIONS
+ TCA_ACT_INDEX
+ TCA_ACT_STATS
+ TCA_ACT_MAX
+)
+
+const (
TCA_PRIO_UNSPEC = iota
TCA_PRIO_MQ
TCA_PRIO_MAX = TCA_PRIO_MQ
@@ -69,7 +78,8 @@ const (
SizeofTcHtbGlob = 0x14
SizeofTcU32Key = 0x10
SizeofTcU32Sel = 0x10 // without keys
- SizeofTcMirred = 0x1c
+ SizeofTcGen = 0x14
+ SizeofTcMirred = SizeofTcGen + 0x08
SizeofTcPolice = 2*SizeofTcRateSpec + 0x20
)
@@ -506,42 +516,92 @@ func (x *TcU32Sel) Serialize() []byte {
return buf
}
+type TcGen struct {
+ Index uint32
+ Capab uint32
+ Action int32
+ Refcnt int32
+ Bindcnt int32
+}
+
+func (msg *TcGen) Len() int {
+ return SizeofTcGen
+}
+
+func DeserializeTcGen(b []byte) *TcGen {
+ return (*TcGen)(unsafe.Pointer(&b[0:SizeofTcGen][0]))
+}
+
+func (x *TcGen) Serialize() []byte {
+ return (*(*[SizeofTcGen]byte)(unsafe.Pointer(x)))[:]
+}
+
+// #define tc_gen \
+// __u32 index; \
+// __u32 capab; \
+// int action; \
+// int refcnt; \
+// int bindcnt
+
const (
- TCA_ACT_MIRRED = 8
+ TCA_ACT_GACT = 5
)
const (
- TCA_MIRRED_UNSPEC = iota
- TCA_MIRRED_TM
- TCA_MIRRED_PARMS
- TCA_MIRRED_MAX = TCA_MIRRED_PARMS
+ TCA_GACT_UNSPEC = iota
+ TCA_GACT_TM
+ TCA_GACT_PARMS
+ TCA_GACT_PROB
+ TCA_GACT_MAX = TCA_GACT_PROB
)
+type TcGact TcGen
+
const (
- TCA_EGRESS_REDIR = 1 /* packet redirect to EGRESS*/
- TCA_EGRESS_MIRROR = 2 /* mirror packet to EGRESS */
- TCA_INGRESS_REDIR = 3 /* packet redirect to INGRESS*/
- TCA_INGRESS_MIRROR = 4 /* mirror packet to INGRESS */
+ TCA_ACT_BPF = 13
)
const (
- TC_ACT_UNSPEC = int32(-1)
- TC_ACT_OK = 0
- TC_ACT_RECLASSIFY = 1
- TC_ACT_SHOT = 2
- TC_ACT_PIPE = 3
- TC_ACT_STOLEN = 4
- TC_ACT_QUEUED = 5
- TC_ACT_REPEAT = 6
- TC_ACT_JUMP = 0x10000000
+ TCA_ACT_BPF_UNSPEC = iota
+ TCA_ACT_BPF_TM
+ TCA_ACT_BPF_PARMS
+ TCA_ACT_BPF_OPS_LEN
+ TCA_ACT_BPF_OPS
+ TCA_ACT_BPF_FD
+ TCA_ACT_BPF_NAME
+ TCA_ACT_BPF_MAX = TCA_ACT_BPF_NAME
+)
+
+const (
+ TCA_BPF_FLAG_ACT_DIRECT uint32 = 1 << iota
+)
+
+const (
+ TCA_BPF_UNSPEC = iota
+ TCA_BPF_ACT
+ TCA_BPF_POLICE
+ TCA_BPF_CLASSID
+ TCA_BPF_OPS_LEN
+ TCA_BPF_OPS
+ TCA_BPF_FD
+ TCA_BPF_NAME
+ TCA_BPF_FLAGS
+ TCA_BPF_MAX = TCA_BPF_FLAGS
+)
+
+type TcBpf TcGen
+
+const (
+ TCA_ACT_MIRRED = 8
+)
+
+const (
+ TCA_MIRRED_UNSPEC = iota
+ TCA_MIRRED_TM
+ TCA_MIRRED_PARMS
+ TCA_MIRRED_MAX = TCA_MIRRED_PARMS
)
-// #define tc_gen \
-// __u32 index; \
-// __u32 capab; \
-// int action; \
-// int refcnt; \
-// int bindcnt
// struct tc_mirred {
// tc_gen;
// int eaction; /* one of IN/EGRESS_MIRROR/REDIR */
@@ -549,11 +609,7 @@ const (
// };
type TcMirred struct {
- Index uint32
- Capab uint32
- Action int32
- Refcnt int32
- Bindcnt int32
+ TcGen
Eaction int32
Ifindex uint32
}
@@ -570,14 +626,6 @@ func (x *TcMirred) Serialize() []byte {
return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:]
}
-const (
- TC_POLICE_UNSPEC = TC_ACT_UNSPEC
- TC_POLICE_OK = TC_ACT_OK
- TC_POLICE_RECLASSIFY = TC_ACT_RECLASSIFY
- TC_POLICE_SHOT = TC_ACT_SHOT
- TC_POLICE_PIPE = TC_ACT_PIPE
-)
-
// struct tc_police {
// __u32 index;
// int action;
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
index d24637d278..ebf47946d6 100644
--- a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
@@ -78,6 +78,7 @@ const (
SizeofXfrmLifetimeCfg = 0x40
SizeofXfrmLifetimeCur = 0x20
SizeofXfrmId = 0x18
+ SizeofXfrmMark = 0x08
)
// typedef union {
@@ -256,3 +257,20 @@ func DeserializeXfrmId(b []byte) *XfrmId {
func (msg *XfrmId) Serialize() []byte {
return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:]
}
+
+type XfrmMark struct {
+ Value uint32
+ Mask uint32
+}
+
+func (msg *XfrmMark) Len() int {
+ return SizeofXfrmMark
+}
+
+func DeserializeXfrmMark(b []byte) *XfrmMark {
+ return (*XfrmMark)(unsafe.Pointer(&b[0:SizeofXfrmMark][0]))
+}
+
+func (msg *XfrmMark) Serialize() []byte {
+ return (*(*[SizeofXfrmMark]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
index 4876ce4583..482b4f37a0 100644
--- a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
@@ -5,12 +5,13 @@ import (
)
const (
- SizeofXfrmUsersaId = 0x18
- SizeofXfrmStats = 0x0c
- SizeofXfrmUsersaInfo = 0xe0
- SizeofXfrmAlgo = 0x44
- SizeofXfrmAlgoAuth = 0x48
- SizeofXfrmEncapTmpl = 0x18
+ SizeofXfrmUsersaId = 0x18
+ SizeofXfrmStats = 0x0c
+ SizeofXfrmUsersaInfo = 0xe0
+ SizeofXfrmAlgo = 0x44
+ SizeofXfrmAlgoAuth = 0x48
+ SizeofXfrmEncapTmpl = 0x18
+ SizeofXfrmUsersaFlush = 0x8
)
// struct xfrm_usersa_id {
@@ -219,3 +220,23 @@ func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl {
func (msg *XfrmEncapTmpl) Serialize() []byte {
return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:]
}
+
+// struct xfrm_usersa_flush {
+// __u8 proto;
+// };
+
+type XfrmUsersaFlush struct {
+ Proto uint8
+}
+
+func (msg *XfrmUsersaFlush) Len() int {
+ return SizeofXfrmUsersaFlush
+}
+
+func DeserializeXfrmUsersaFlush(b []byte) *XfrmUsersaFlush {
+ return (*XfrmUsersaFlush)(unsafe.Pointer(&b[0:SizeofXfrmUsersaFlush][0]))
+}
+
+func (msg *XfrmUsersaFlush) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaFlush]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go
index 7181eba100..5b95d76486 100644
--- a/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go
@@ -8,10 +8,14 @@ import (
)
func LinkGetProtinfo(link Link) (Protinfo, error) {
+ return pkgHandle.LinkGetProtinfo(link)
+}
+
+func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) {
base := link.Attrs()
- ensureIndex(base)
+ h.ensureIndex(base)
var pi Protinfo
- req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
req.AddData(msg)
msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
diff --git a/vendor/src/github.com/vishvananda/netlink/qdisc.go b/vendor/src/github.com/vishvananda/netlink/qdisc.go
index 48fe7c7981..3fb2bbae24 100644
--- a/vendor/src/github.com/vishvananda/netlink/qdisc.go
+++ b/vendor/src/github.com/vishvananda/netlink/qdisc.go
@@ -8,16 +8,21 @@ import (
const (
HANDLE_NONE = 0
HANDLE_INGRESS = 0xFFFFFFF1
+ HANDLE_CLSACT = HANDLE_INGRESS
HANDLE_ROOT = 0xFFFFFFFF
PRIORITY_MAP_LEN = 16
)
+const (
+ HANDLE_MIN_INGRESS = 0xFFFFFFF2
+ HANDLE_MIN_EGRESS = 0xFFFFFFF3
+)
type Qdisc interface {
Attrs() *QdiscAttrs
Type() string
}
-// Qdisc represents a netlink qdisc. A qdisc is associated with a link,
+// QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link,
// has a handle, a parent and a refcnt. The root qdisc of a device should
// have parent == HANDLE_ROOT.
type QdiscAttrs struct {
@@ -28,7 +33,7 @@ type QdiscAttrs struct {
}
func (q QdiscAttrs) String() string {
- return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %s}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt)
+ return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt)
}
func MakeHandle(major, minor uint16) uint32 {
@@ -149,7 +154,7 @@ type NetemQdiscAttrs struct {
func (q NetemQdiscAttrs) String() string {
return fmt.Sprintf(
- "{Latency: %d, Limit: %d, Loss: %d, Gap: %d, Duplicate: %d, Jitter: %d}",
+ "{Latency: %d, Limit: %d, Loss: %f, Gap: %d, Duplicate: %f, Jitter: %d}",
q.Latency, q.Limit, q.Loss, q.Gap, q.Duplicate, q.Jitter,
)
}
@@ -173,9 +178,9 @@ type Netem struct {
func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem {
var limit uint32 = 1000
- var loss_corr, delay_corr, duplicate_corr uint32
- var reorder_prob, reorder_corr uint32
- var corrupt_prob, corrupt_corr uint32
+ var lossCorr, delayCorr, duplicateCorr uint32
+ var reorderProb, reorderCorr uint32
+ var corruptProb, corruptCorr uint32
latency := nattrs.Latency
loss := Percentage2u32(nattrs.Loss)
@@ -185,13 +190,13 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem {
// Correlation
if latency > 0 && jitter > 0 {
- delay_corr = Percentage2u32(nattrs.DelayCorr)
+ delayCorr = Percentage2u32(nattrs.DelayCorr)
}
if loss > 0 {
- loss_corr = Percentage2u32(nattrs.LossCorr)
+ lossCorr = Percentage2u32(nattrs.LossCorr)
}
if duplicate > 0 {
- duplicate_corr = Percentage2u32(nattrs.DuplicateCorr)
+ duplicateCorr = Percentage2u32(nattrs.DuplicateCorr)
}
// FIXME should validate values(like loss/duplicate are percentages...)
latency = time2Tick(latency)
@@ -204,34 +209,34 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem {
jitter = time2Tick(jitter)
}
- reorder_prob = Percentage2u32(nattrs.ReorderProb)
- reorder_corr = Percentage2u32(nattrs.ReorderCorr)
+ reorderProb = Percentage2u32(nattrs.ReorderProb)
+ reorderCorr = Percentage2u32(nattrs.ReorderCorr)
- if reorder_prob > 0 {
+ if reorderProb > 0 {
// ERROR if lantency == 0
if gap == 0 {
gap = 1
}
}
- corrupt_prob = Percentage2u32(nattrs.CorruptProb)
- corrupt_corr = Percentage2u32(nattrs.CorruptCorr)
+ corruptProb = Percentage2u32(nattrs.CorruptProb)
+ corruptCorr = Percentage2u32(nattrs.CorruptCorr)
return &Netem{
QdiscAttrs: attrs,
Latency: latency,
- DelayCorr: delay_corr,
+ DelayCorr: delayCorr,
Limit: limit,
Loss: loss,
- LossCorr: loss_corr,
+ LossCorr: lossCorr,
Gap: gap,
Duplicate: duplicate,
- DuplicateCorr: duplicate_corr,
+ DuplicateCorr: duplicateCorr,
Jitter: jitter,
- ReorderProb: reorder_prob,
- ReorderCorr: reorder_corr,
- CorruptProb: corrupt_prob,
- CorruptCorr: corrupt_corr,
+ ReorderProb: reorderProb,
+ ReorderCorr: reorderCorr,
+ CorruptProb: corruptProb,
+ CorruptCorr: corruptCorr,
}
}
diff --git a/vendor/src/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/src/github.com/vishvananda/netlink/qdisc_linux.go
index d9a8b170f5..62ac9d3dd8 100644
--- a/vendor/src/github.com/vishvananda/netlink/qdisc_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/qdisc_linux.go
@@ -13,21 +13,41 @@ import (
// QdiscDel will delete a qdisc from the system.
// Equivalent to: `tc qdisc del $qdisc`
func QdiscDel(qdisc Qdisc) error {
- return qdiscModify(syscall.RTM_DELQDISC, 0, qdisc)
+ return pkgHandle.QdiscDel(qdisc)
+}
+
+// QdiscDel will delete a qdisc from the system.
+// Equivalent to: `tc qdisc del $qdisc`
+func (h *Handle) QdiscDel(qdisc Qdisc) error {
+ return h.qdiscModify(syscall.RTM_DELQDISC, 0, qdisc)
}
// QdiscChange will change a qdisc in place
// Equivalent to: `tc qdisc change $qdisc`
// The parent and handle MUST NOT be changed.
func QdiscChange(qdisc Qdisc) error {
- return qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc)
+ return pkgHandle.QdiscChange(qdisc)
+}
+
+// QdiscChange will change a qdisc in place
+// Equivalent to: `tc qdisc change $qdisc`
+// The parent and handle MUST NOT be changed.
+func (h *Handle) QdiscChange(qdisc Qdisc) error {
+ return h.qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc)
}
// QdiscReplace will replace a qdisc to the system.
// Equivalent to: `tc qdisc replace $qdisc`
// The handle MUST change.
func QdiscReplace(qdisc Qdisc) error {
- return qdiscModify(
+ return pkgHandle.QdiscReplace(qdisc)
+}
+
+// QdiscReplace will replace a qdisc to the system.
+// Equivalent to: `tc qdisc replace $qdisc`
+// The handle MUST change.
+func (h *Handle) QdiscReplace(qdisc Qdisc) error {
+ return h.qdiscModify(
syscall.RTM_NEWQDISC,
syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE,
qdisc)
@@ -36,14 +56,20 @@ func QdiscReplace(qdisc Qdisc) error {
// QdiscAdd will add a qdisc to the system.
// Equivalent to: `tc qdisc add $qdisc`
func QdiscAdd(qdisc Qdisc) error {
- return qdiscModify(
+ return pkgHandle.QdiscAdd(qdisc)
+}
+
+// QdiscAdd will add a qdisc to the system.
+// Equivalent to: `tc qdisc add $qdisc`
+func (h *Handle) QdiscAdd(qdisc Qdisc) error {
+ return h.qdiscModify(
syscall.RTM_NEWQDISC,
syscall.NLM_F_CREATE|syscall.NLM_F_EXCL,
qdisc)
}
-func qdiscModify(cmd, flags int, qdisc Qdisc) error {
- req := nl.NewNetlinkRequest(cmd, flags|syscall.NLM_F_ACK)
+func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error {
+ req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK)
base := qdisc.Attrs()
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
@@ -139,11 +165,18 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
// Equivalent to: `tc qdisc show`.
// The list can be filtered by link.
func QdiscList(link Link) ([]Qdisc, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP)
+ return pkgHandle.QdiscList(link)
+}
+
+// QdiscList gets a list of qdiscs in the system.
+// Equivalent to: `tc qdisc show`.
+// The list can be filtered by link.
+func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP)
index := int32(0)
if link != nil {
base := link.Attrs()
- ensureIndex(base)
+ h.ensureIndex(base)
index = int32(base.Index)
}
msg := &nl.TcMsg{
@@ -334,9 +367,9 @@ const (
)
var (
- tickInUsec float64 = 0.0
- clockFactor float64 = 0.0
- hz float64 = 0.0
+ tickInUsec float64
+ clockFactor float64
+ hz float64
)
func initClock() {
diff --git a/vendor/src/github.com/vishvananda/netlink/route.go b/vendor/src/github.com/vishvananda/netlink/route.go
index a7303d4c22..aa869ea93c 100644
--- a/vendor/src/github.com/vishvananda/netlink/route.go
+++ b/vendor/src/github.com/vishvananda/netlink/route.go
@@ -41,8 +41,8 @@ type Route struct {
}
func (r Route) String() string {
- return fmt.Sprintf("{Ifindex: %d Dst: %s Src: %s Gw: %s Flags: %s}", r.LinkIndex, r.Dst,
- r.Src, r.Gw, r.ListFlags())
+ return fmt.Sprintf("{Ifindex: %d Dst: %s Src: %s Gw: %s Flags: %s Table: %d}", r.LinkIndex, r.Dst,
+ r.Src, r.Gw, r.ListFlags(), r.Table)
}
func (r *Route) SetFlag(flag NextHopFlag) {
@@ -59,8 +59,8 @@ type flagString struct {
}
var testFlags = []flagString{
- flagString{f: FLAG_ONLINK, s: "onlink"},
- flagString{f: FLAG_PERVASIVE, s: "pervasive"},
+ {f: FLAG_ONLINK, s: "onlink"},
+ {f: FLAG_PERVASIVE, s: "pervasive"},
}
func (r *Route) ListFlags() []string {
diff --git a/vendor/src/github.com/vishvananda/netlink/route_linux.go b/vendor/src/github.com/vishvananda/netlink/route_linux.go
index d8026a7d1b..db952512e9 100644
--- a/vendor/src/github.com/vishvananda/netlink/route_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/route_linux.go
@@ -26,18 +26,30 @@ const (
// RouteAdd will add a route to the system.
// Equivalent to: `ip route add $route`
func RouteAdd(route *Route) error {
- req := nl.NewNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
- return routeHandle(route, req, nl.NewRtMsg())
+ return pkgHandle.RouteAdd(route)
+}
+
+// RouteAdd will add a route to the system.
+// Equivalent to: `ip route add $route`
+func (h *Handle) RouteAdd(route *Route) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return h.routeHandle(route, req, nl.NewRtMsg())
}
// RouteDel will delete a route from the system.
// Equivalent to: `ip route del $route`
func RouteDel(route *Route) error {
- req := nl.NewNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK)
- return routeHandle(route, req, nl.NewRtDelMsg())
+ return pkgHandle.RouteDel(route)
}
-func routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
+// RouteDel will delete a route from the system.
+// Equivalent to: `ip route del $route`
+func (h *Handle) RouteDel(route *Route) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK)
+ return h.routeHandle(route, req, nl.NewRtDelMsg())
+}
+
+func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil {
return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil")
}
@@ -116,6 +128,7 @@ func routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
msg.Type = uint8(route.Type)
}
+ msg.Flags = uint32(route.Flags)
msg.Scope = uint8(route.Scope)
msg.Family = uint8(family)
req.AddData(msg)
@@ -139,19 +152,32 @@ func routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
// Equivalent to: `ip route show`.
// The list can be filtered by link and ip family.
func RouteList(link Link, family int) ([]Route, error) {
+ return pkgHandle.RouteList(link, family)
+}
+
+// RouteList gets a list of routes in the system.
+// Equivalent to: `ip route show`.
+// The list can be filtered by link and ip family.
+func (h *Handle) RouteList(link Link, family int) ([]Route, error) {
var routeFilter *Route
if link != nil {
routeFilter = &Route{
LinkIndex: link.Attrs().Index,
}
}
- return RouteListFiltered(family, routeFilter, RT_FILTER_OIF)
+ return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF)
}
// RouteListFiltered gets a list of routes in the system filtered with specified rules.
// All rules must be defined in RouteFilter struct
func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
+ return pkgHandle.RouteListFiltered(family, filter, filterMask)
+}
+
+// RouteListFiltered gets a list of routes in the system filtered with specified rules.
+// All rules must be defined in RouteFilter struct
+func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
infmsg := nl.NewIfInfomsg(family)
req.AddData(infmsg)
@@ -257,7 +283,13 @@ func deserializeRoute(m []byte) (Route, error) {
// RouteGet gets a route to a specific destination from the host system.
// Equivalent to: 'ip route get'.
func RouteGet(destination net.IP) ([]Route, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST)
+ return pkgHandle.RouteGet(destination)
+}
+
+// RouteGet gets a route to a specific destination from the host system.
+// Equivalent to: 'ip route get'.
+func (h *Handle) RouteGet(destination net.IP) ([]Route, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST)
family := nl.GetIPFamily(destination)
var destinationData []byte
var bitlen uint8
diff --git a/vendor/src/github.com/vishvananda/netlink/rule_linux.go b/vendor/src/github.com/vishvananda/netlink/rule_linux.go
index ba84be00e7..8bce6666c9 100644
--- a/vendor/src/github.com/vishvananda/netlink/rule_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/rule_linux.go
@@ -11,14 +11,26 @@ import (
// RuleAdd adds a rule to the system.
// Equivalent to: ip rule add
func RuleAdd(rule *Rule) error {
- req := nl.NewNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return pkgHandle.RuleAdd(rule)
+}
+
+// RuleAdd adds a rule to the system.
+// Equivalent to: ip rule add
+func (h *Handle) RuleAdd(rule *Rule) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
return ruleHandle(rule, req)
}
// RuleDel deletes a rule from the system.
// Equivalent to: ip rule del
func RuleDel(rule *Rule) error {
- req := nl.NewNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return pkgHandle.RuleDel(rule)
+}
+
+// RuleDel deletes a rule from the system.
+// Equivalent to: ip rule del
+func (h *Handle) RuleDel(rule *Rule) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
return ruleHandle(rule, req)
}
@@ -128,7 +140,13 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
// RuleList lists rules in the system.
// Equivalent to: ip rule list
func RuleList(family int) ([]Rule, error) {
- req := nl.NewNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST)
+ return pkgHandle.RuleList(family)
+}
+
+// RuleList lists rules in the system.
+// Equivalent to: ip rule list
+func (h *Handle) RuleList(family int) ([]Rule, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST)
msg := nl.NewIfInfomsg(family)
req.AddData(msg)
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm.go b/vendor/src/github.com/vishvananda/netlink/xfrm.go
index 621ffb6c68..6ec40acac9 100644
--- a/vendor/src/github.com/vishvananda/netlink/xfrm.go
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm.go
@@ -62,3 +62,13 @@ func (m Mode) String() string {
}
return fmt.Sprintf("%d", m)
}
+
+// XfrmMark represents the mark associated to the state or policy
+type XfrmMark struct {
+ Value uint32
+ Mask uint32
+}
+
+func (m *XfrmMark) String() string {
+ return fmt.Sprintf("(0x%x,0x%x)", m.Value, m.Mask)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go
index d85c65d2d2..c97ec43a25 100644
--- a/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go
@@ -43,17 +43,32 @@ type XfrmPolicyTmpl struct {
Src net.IP
Proto Proto
Mode Mode
+ Spi int
Reqid int
}
+func (t XfrmPolicyTmpl) String() string {
+ return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}",
+ t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid)
+}
+
// XfrmPolicy represents an ipsec policy. It represents the overlay network
// and has a list of XfrmPolicyTmpls representing the base addresses of
// the policy.
type XfrmPolicy struct {
Dst *net.IPNet
Src *net.IPNet
+ Proto Proto
+ DstPort int
+ SrcPort int
Dir Dir
Priority int
Index int
+ Mark *XfrmMark
Tmpls []XfrmPolicyTmpl
}
+
+func (p XfrmPolicy) String() string {
+ return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Mark: %s, Tmpls: %s}",
+ p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Mark, p.Tmpls)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
index 2daf6dc8b3..c3d4e42227 100644
--- a/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -7,19 +7,55 @@ import (
)
func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) {
- sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP))
- sel.Daddr.FromIP(policy.Dst.IP)
- sel.Saddr.FromIP(policy.Src.IP)
- prefixlenD, _ := policy.Dst.Mask.Size()
- sel.PrefixlenD = uint8(prefixlenD)
- prefixlenS, _ := policy.Src.Mask.Size()
- sel.PrefixlenS = uint8(prefixlenS)
+ sel.Family = uint16(nl.FAMILY_V4)
+ if policy.Dst != nil {
+ sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP))
+ sel.Daddr.FromIP(policy.Dst.IP)
+ prefixlenD, _ := policy.Dst.Mask.Size()
+ sel.PrefixlenD = uint8(prefixlenD)
+ }
+ if policy.Src != nil {
+ sel.Saddr.FromIP(policy.Src.IP)
+ prefixlenS, _ := policy.Src.Mask.Size()
+ sel.PrefixlenS = uint8(prefixlenS)
+ }
+ sel.Proto = uint8(policy.Proto)
+ sel.Dport = nl.Swap16(uint16(policy.DstPort))
+ sel.Sport = nl.Swap16(uint16(policy.SrcPort))
+ if sel.Dport != 0 {
+ sel.DportMask = ^uint16(0)
+ }
+ if sel.Sport != 0 {
+ sel.SportMask = ^uint16(0)
+ }
}
// XfrmPolicyAdd will add an xfrm policy to the system.
// Equivalent to: `ip xfrm policy add $policy`
func XfrmPolicyAdd(policy *XfrmPolicy) error {
- req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWPOLICY, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return pkgHandle.XfrmPolicyAdd(policy)
+}
+
+// XfrmPolicyAdd will add an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy add $policy`
+func (h *Handle) XfrmPolicyAdd(policy *XfrmPolicy) error {
+ return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_NEWPOLICY)
+}
+
+// XfrmPolicyUpdate will update an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy update $policy`
+func XfrmPolicyUpdate(policy *XfrmPolicy) error {
+ return pkgHandle.XfrmPolicyUpdate(policy)
+}
+
+// XfrmPolicyUpdate will update an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy update $policy`
+func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error {
+ return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_UPDPOLICY)
+}
+
+func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error {
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
msg := &nl.XfrmUserpolicyInfo{}
selFromPolicy(&msg.Sel, policy)
@@ -39,6 +75,7 @@ func XfrmPolicyAdd(policy *XfrmPolicy) error {
userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst)
userTmpl.Saddr.FromIP(tmpl.Src)
userTmpl.XfrmId.Proto = uint8(tmpl.Proto)
+ userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi))
userTmpl.Mode = uint8(tmpl.Mode)
userTmpl.Reqid = uint32(tmpl.Reqid)
userTmpl.Aalgos = ^uint32(0)
@@ -49,6 +86,10 @@ func XfrmPolicyAdd(policy *XfrmPolicy) error {
tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData)
req.AddData(tmpls)
}
+ if policy.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark))
+ req.AddData(out)
+ }
_, err := req.Execute(syscall.NETLINK_XFRM, 0)
return err
@@ -58,15 +99,14 @@ func XfrmPolicyAdd(policy *XfrmPolicy) error {
// the Tmpls are ignored when matching the policy to delete.
// Equivalent to: `ip xfrm policy del $policy`
func XfrmPolicyDel(policy *XfrmPolicy) error {
- req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELPOLICY, syscall.NLM_F_ACK)
-
- msg := &nl.XfrmUserpolicyId{}
- selFromPolicy(&msg.Sel, policy)
- msg.Index = uint32(policy.Index)
- msg.Dir = uint8(policy.Dir)
- req.AddData(msg)
+ return pkgHandle.XfrmPolicyDel(policy)
+}
- _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+// XfrmPolicyDel will delete an xfrm policy from the system. Note that
+// the Tmpls are ignored when matching the policy to delete.
+// Equivalent to: `ip xfrm policy del $policy`
+func (h *Handle) XfrmPolicyDel(policy *XfrmPolicy) error {
+ _, err := h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_DELPOLICY)
return err
}
@@ -74,7 +114,14 @@ func XfrmPolicyDel(policy *XfrmPolicy) error {
// Equivalent to: `ip xfrm policy show`.
// The list can be filtered by ip family.
func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
- req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP)
+ return pkgHandle.XfrmPolicyList(family)
+}
+
+// XfrmPolicyList gets a list of xfrm policies in the system.
+// Equivalent to: `ip xfrm policy show`.
+// The list can be filtered by ip family.
+func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP)
msg := nl.NewIfInfomsg(family)
req.AddData(msg)
@@ -86,42 +133,125 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
var res []XfrmPolicy
for _, m := range msgs {
- msg := nl.DeserializeXfrmUserpolicyInfo(m)
-
- if family != FAMILY_ALL && family != int(msg.Sel.Family) {
+ if policy, err := parseXfrmPolicy(m, family); err == nil {
+ res = append(res, *policy)
+ } else if err == familyError {
continue
+ } else {
+ return nil, err
}
+ }
+ return res, nil
+}
- var policy XfrmPolicy
+// XfrmPolicyGet gets a the policy described by the index or selector, if found.
+// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`.
+func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) {
+ return pkgHandle.XfrmPolicyGet(policy)
+}
- policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD)
- policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS)
- policy.Priority = int(msg.Priority)
- policy.Index = int(msg.Index)
- policy.Dir = Dir(msg.Dir)
+// XfrmPolicyGet gets a the policy described by the index or selector, if found.
+// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`.
+func (h *Handle) XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) {
+ return h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_GETPOLICY)
+}
- attrs, err := nl.ParseRouteAttr(m[msg.Len():])
- if err != nil {
- return nil, err
- }
+// XfrmPolicyFlush will flush the policies on the system.
+// Equivalent to: `ip xfrm policy flush`
+func XfrmPolicyFlush() error {
+ return pkgHandle.XfrmPolicyFlush()
+}
+
+// XfrmPolicyFlush will flush the policies on the system.
+// Equivalent to: `ip xfrm policy flush`
+func (h *Handle) XfrmPolicyFlush() error {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, syscall.NLM_F_ACK)
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) {
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserpolicyId{}
+ selFromPolicy(&msg.Sel, policy)
+ msg.Index = uint32(policy.Index)
+ msg.Dir = uint8(policy.Dir)
+ req.AddData(msg)
+
+ if policy.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark))
+ req.AddData(out)
+ }
+
+ resType := nl.XFRM_MSG_NEWPOLICY
+ if nlProto == nl.XFRM_MSG_DELPOLICY {
+ resType = 0
+ }
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType))
+ if err != nil {
+ return nil, err
+ }
- for _, attr := range attrs {
- switch attr.Attr.Type {
- case nl.XFRMA_TMPL:
- max := len(attr.Value)
- for i := 0; i < max; i += nl.SizeofXfrmUserTmpl {
- var resTmpl XfrmPolicyTmpl
- tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl])
- resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP()
- resTmpl.Src = tmpl.Saddr.ToIP()
- resTmpl.Proto = Proto(tmpl.XfrmId.Proto)
- resTmpl.Mode = Mode(tmpl.Mode)
- resTmpl.Reqid = int(tmpl.Reqid)
- policy.Tmpls = append(policy.Tmpls, resTmpl)
- }
+ if nlProto == nl.XFRM_MSG_DELPOLICY {
+ return nil, err
+ }
+
+ p, err := parseXfrmPolicy(msgs[0], FAMILY_ALL)
+ if err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) {
+ msg := nl.DeserializeXfrmUserpolicyInfo(m)
+
+ // This is mainly for the policy dump
+ if family != FAMILY_ALL && family != int(msg.Sel.Family) {
+ return nil, familyError
+ }
+
+ var policy XfrmPolicy
+
+ policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD)
+ policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS)
+ policy.Proto = Proto(msg.Sel.Proto)
+ policy.DstPort = int(nl.Swap16(msg.Sel.Dport))
+ policy.SrcPort = int(nl.Swap16(msg.Sel.Sport))
+ policy.Priority = int(msg.Priority)
+ policy.Index = int(msg.Index)
+ policy.Dir = Dir(msg.Dir)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_TMPL:
+ max := len(attr.Value)
+ for i := 0; i < max; i += nl.SizeofXfrmUserTmpl {
+ var resTmpl XfrmPolicyTmpl
+ tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl])
+ resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP()
+ resTmpl.Src = tmpl.Saddr.ToIP()
+ resTmpl.Proto = Proto(tmpl.XfrmId.Proto)
+ resTmpl.Mode = Mode(tmpl.Mode)
+ resTmpl.Spi = int(nl.Swap32(tmpl.XfrmId.Spi))
+ resTmpl.Reqid = int(tmpl.Reqid)
+ policy.Tmpls = append(policy.Tmpls, resTmpl)
}
+ case nl.XFRMA_MARK:
+ mark := nl.DeserializeXfrmMark(attr.Value[:])
+ policy.Mark = new(XfrmMark)
+ policy.Mark.Value = mark.Value
+ policy.Mark.Mask = mark.Mask
}
- res = append(res, policy)
}
- return res, nil
+
+ return &policy, nil
}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state.go
index 5b8f2df708..7f38bfa226 100644
--- a/vendor/src/github.com/vishvananda/netlink/xfrm_state.go
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state.go
@@ -1,7 +1,10 @@
package netlink
import (
+ "fmt"
"net"
+
+ "github.com/vishvananda/netlink/nl"
)
// XfrmStateAlgo represents the algorithm to use for the ipsec encryption.
@@ -11,7 +14,11 @@ type XfrmStateAlgo struct {
TruncateLen int // Auth only
}
-// EncapType is an enum representing an ipsec template direction.
+func (a XfrmStateAlgo) String() string {
+ return fmt.Sprintf("{Name: %s, Key: 0x%x, TruncateLen: %d}", a.Name, a.Key, a.TruncateLen)
+}
+
+// EncapType is an enum representing the optional packet encapsulation.
type EncapType uint8
const (
@@ -22,14 +29,14 @@ const (
func (e EncapType) String() string {
switch e {
case XFRM_ENCAP_ESPINUDP_NONIKE:
- return "espinudp-nonike"
+ return "espinudp-non-ike"
case XFRM_ENCAP_ESPINUDP:
return "espinudp"
}
return "unknown"
}
-// XfrmEncap represents the encapsulation to use for the ipsec encryption.
+// XfrmStateEncap represents the encapsulation to use for the ipsec encryption.
type XfrmStateEncap struct {
Type EncapType
SrcPort int
@@ -37,6 +44,23 @@ type XfrmStateEncap struct {
OriginalAddress net.IP
}
+func (e XfrmStateEncap) String() string {
+ return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}",
+ e.Type, e.SrcPort, e.DstPort, e.OriginalAddress)
+}
+
+// XfrmStateLimits represents the configured limits for the state.
+type XfrmStateLimits struct {
+ ByteSoft uint64
+ ByteHard uint64
+ PacketSoft uint64
+ PacketHard uint64
+ TimeSoft uint64
+ TimeHard uint64
+ TimeUseSoft uint64
+ TimeUseHard uint64
+}
+
// XfrmState represents the state of an ipsec policy. It optionally
// contains an XfrmStateAlgo for encryption and one for authentication.
type XfrmState struct {
@@ -47,7 +71,30 @@ type XfrmState struct {
Spi int
Reqid int
ReplayWindow int
+ Limits XfrmStateLimits
+ Mark *XfrmMark
Auth *XfrmStateAlgo
Crypt *XfrmStateAlgo
Encap *XfrmStateEncap
}
+
+func (sa XfrmState) String() string {
+ return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Encap: %v",
+ sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Encap)
+}
+func (sa XfrmState) Print(stats bool) string {
+ if !stats {
+ return sa.String()
+ }
+
+ return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d",
+ sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard),
+ sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard)
+}
+
+func printLimit(lmt uint64) string {
+ if lmt == nl.XFRM_INF {
+ return "(INF)"
+ }
+ return fmt.Sprintf("%d", lmt)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go
index fc8604b9d1..5f294c713d 100644
--- a/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -3,6 +3,7 @@ package netlink
import (
"fmt"
"syscall"
+ "unsafe"
"github.com/vishvananda/netlink/nl"
)
@@ -34,14 +35,47 @@ func writeStateAlgoAuth(a *XfrmStateAlgo) []byte {
return algo.Serialize()
}
+func writeMark(m *XfrmMark) []byte {
+ mark := &nl.XfrmMark{
+ Value: m.Value,
+ Mask: m.Mask,
+ }
+ if mark.Mask == 0 {
+ mark.Mask = ^uint32(0)
+ }
+ return mark.Serialize()
+}
+
// XfrmStateAdd will add an xfrm state to the system.
// Equivalent to: `ip xfrm state add $state`
func XfrmStateAdd(state *XfrmState) error {
+ return pkgHandle.XfrmStateAdd(state)
+}
+
+// XfrmStateAdd will add an xfrm state to the system.
+// Equivalent to: `ip xfrm state add $state`
+func (h *Handle) XfrmStateAdd(state *XfrmState) error {
+ return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_NEWSA)
+}
+
+// XfrmStateUpdate will update an xfrm state to the system.
+// Equivalent to: `ip xfrm state update $state`
+func XfrmStateUpdate(state *XfrmState) error {
+ return pkgHandle.XfrmStateUpdate(state)
+}
+
+// XfrmStateUpdate will update an xfrm state to the system.
+// Equivalent to: `ip xfrm state update $state`
+func (h *Handle) XfrmStateUpdate(state *XfrmState) error {
+ return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_UPDSA)
+}
+
+func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error {
// A state with spi 0 can't be deleted so don't allow it to be set
if state.Spi == 0 {
return fmt.Errorf("Spi must be set when adding xfrm state.")
}
- req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWSA, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
msg := &nl.XfrmUsersaInfo{}
msg.Family = uint16(nl.GetIPFamily(state.Dst))
@@ -52,10 +86,7 @@ func XfrmStateAdd(state *XfrmState) error {
msg.Id.Spi = nl.Swap32(uint32(state.Spi))
msg.Reqid = uint32(state.Reqid)
msg.ReplayWindow = uint8(state.ReplayWindow)
- msg.Lft.SoftByteLimit = nl.XFRM_INF
- msg.Lft.HardByteLimit = nl.XFRM_INF
- msg.Lft.SoftPacketLimit = nl.XFRM_INF
- msg.Lft.HardPacketLimit = nl.XFRM_INF
+ limitsToLft(state.Limits, &msg.Lft)
req.AddData(msg)
if state.Auth != nil {
@@ -76,6 +107,10 @@ func XfrmStateAdd(state *XfrmState) error {
out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData)
req.AddData(out)
}
+ if state.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark))
+ req.AddData(out)
+ }
_, err := req.Execute(syscall.NETLINK_XFRM, 0)
return err
@@ -85,30 +120,29 @@ func XfrmStateAdd(state *XfrmState) error {
// the Algos are ignored when matching the state to delete.
// Equivalent to: `ip xfrm state del $state`
func XfrmStateDel(state *XfrmState) error {
- req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELSA, syscall.NLM_F_ACK)
-
- msg := &nl.XfrmUsersaId{}
- msg.Daddr.FromIP(state.Dst)
- msg.Family = uint16(nl.GetIPFamily(state.Dst))
- msg.Proto = uint8(state.Proto)
- msg.Spi = nl.Swap32(uint32(state.Spi))
- req.AddData(msg)
-
- saddr := nl.XfrmAddress{}
- saddr.FromIP(state.Src)
- srcdata := nl.NewRtAttr(nl.XFRMA_SRCADDR, saddr.Serialize())
-
- req.AddData(srcdata)
+ return pkgHandle.XfrmStateDel(state)
+}
- _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+// XfrmStateDel will delete an xfrm state from the system. Note that
+// the Algos are ignored when matching the state to delete.
+// Equivalent to: `ip xfrm state del $state`
+func (h *Handle) XfrmStateDel(state *XfrmState) error {
+ _, err := h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_DELSA)
return err
}
// XfrmStateList gets a list of xfrm states in the system.
-// Equivalent to: `ip xfrm state show`.
+// Equivalent to: `ip [-4|-6] xfrm state show`.
// The list can be filtered by ip family.
func XfrmStateList(family int) ([]XfrmState, error) {
- req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP)
+ return pkgHandle.XfrmStateList(family)
+}
+
+// XfrmStateList gets a list of xfrm states in the system.
+// Equivalent to: `ip xfrm state show`.
+// The list can be filtered by ip family.
+func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP)
msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
if err != nil {
@@ -117,62 +151,194 @@ func XfrmStateList(family int) ([]XfrmState, error) {
var res []XfrmState
for _, m := range msgs {
- msg := nl.DeserializeXfrmUsersaInfo(m)
-
- if family != FAMILY_ALL && family != int(msg.Family) {
+ if state, err := parseXfrmState(m, family); err == nil {
+ res = append(res, *state)
+ } else if err == familyError {
continue
+ } else {
+ return nil, err
}
+ }
+ return res, nil
+}
- var state XfrmState
+// XfrmStateGet gets the xfrm state described by the ID, if found.
+// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`.
+// Only the fields which constitue the SA ID must be filled in:
+// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ]
+// mark is optional
+func XfrmStateGet(state *XfrmState) (*XfrmState, error) {
+ return pkgHandle.XfrmStateGet(state)
+}
- state.Dst = msg.Id.Daddr.ToIP()
- state.Src = msg.Saddr.ToIP()
- state.Proto = Proto(msg.Id.Proto)
- state.Mode = Mode(msg.Mode)
- state.Spi = int(nl.Swap32(msg.Id.Spi))
- state.Reqid = int(msg.Reqid)
- state.ReplayWindow = int(msg.ReplayWindow)
+// XfrmStateGet gets the xfrm state described by the ID, if found.
+// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`.
+// Only the fields which constitue the SA ID must be filled in:
+// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ]
+// mark is optional
+func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) {
+ return h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_GETSA)
+}
- attrs, err := nl.ParseRouteAttr(m[msg.Len():])
- if err != nil {
- return nil, err
- }
+func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) {
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK)
- for _, attr := range attrs {
- switch attr.Attr.Type {
- case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT:
- var resAlgo *XfrmStateAlgo
- if attr.Attr.Type == nl.XFRMA_ALG_AUTH {
- if state.Auth == nil {
- state.Auth = new(XfrmStateAlgo)
- }
- resAlgo = state.Auth
- } else {
- state.Crypt = new(XfrmStateAlgo)
- resAlgo = state.Crypt
- }
- algo := nl.DeserializeXfrmAlgo(attr.Value[:])
- (*resAlgo).Name = nl.BytesToString(algo.AlgName[:])
- (*resAlgo).Key = algo.AlgKey
- case nl.XFRMA_ALG_AUTH_TRUNC:
+ msg := &nl.XfrmUsersaId{}
+ msg.Family = uint16(nl.GetIPFamily(state.Dst))
+ msg.Daddr.FromIP(state.Dst)
+ msg.Proto = uint8(state.Proto)
+ msg.Spi = nl.Swap32(uint32(state.Spi))
+ req.AddData(msg)
+
+ if state.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark))
+ req.AddData(out)
+ }
+ if state.Src != nil {
+ out := nl.NewRtAttr(nl.XFRMA_SRCADDR, state.Src)
+ req.AddData(out)
+ }
+
+ resType := nl.XFRM_MSG_NEWSA
+ if nlProto == nl.XFRM_MSG_DELSA {
+ resType = 0
+ }
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType))
+ if err != nil {
+ return nil, err
+ }
+
+ if nlProto == nl.XFRM_MSG_DELSA {
+ return nil, nil
+ }
+
+ s, err := parseXfrmState(msgs[0], FAMILY_ALL)
+ if err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+var familyError = fmt.Errorf("family error")
+
+func parseXfrmState(m []byte, family int) (*XfrmState, error) {
+ msg := nl.DeserializeXfrmUsersaInfo(m)
+
+ // This is mainly for the state dump
+ if family != FAMILY_ALL && family != int(msg.Family) {
+ return nil, familyError
+ }
+
+ var state XfrmState
+
+ state.Dst = msg.Id.Daddr.ToIP()
+ state.Src = msg.Saddr.ToIP()
+ state.Proto = Proto(msg.Id.Proto)
+ state.Mode = Mode(msg.Mode)
+ state.Spi = int(nl.Swap32(msg.Id.Spi))
+ state.Reqid = int(msg.Reqid)
+ state.ReplayWindow = int(msg.ReplayWindow)
+ lftToLimits(&msg.Lft, &state.Limits)
+
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT:
+ var resAlgo *XfrmStateAlgo
+ if attr.Attr.Type == nl.XFRMA_ALG_AUTH {
if state.Auth == nil {
state.Auth = new(XfrmStateAlgo)
}
- algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:])
- state.Auth.Name = nl.BytesToString(algo.AlgName[:])
- state.Auth.Key = algo.AlgKey
- state.Auth.TruncateLen = int(algo.AlgTruncLen)
- case nl.XFRMA_ENCAP:
- encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
- state.Encap = new(XfrmStateEncap)
- state.Encap.Type = EncapType(encap.EncapType)
- state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport))
- state.Encap.DstPort = int(nl.Swap16(encap.EncapDport))
- state.Encap.OriginalAddress = encap.EncapOa.ToIP()
+ resAlgo = state.Auth
+ } else {
+ state.Crypt = new(XfrmStateAlgo)
+ resAlgo = state.Crypt
}
-
+ algo := nl.DeserializeXfrmAlgo(attr.Value[:])
+ (*resAlgo).Name = nl.BytesToString(algo.AlgName[:])
+ (*resAlgo).Key = algo.AlgKey
+ case nl.XFRMA_ALG_AUTH_TRUNC:
+ if state.Auth == nil {
+ state.Auth = new(XfrmStateAlgo)
+ }
+ algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:])
+ state.Auth.Name = nl.BytesToString(algo.AlgName[:])
+ state.Auth.Key = algo.AlgKey
+ state.Auth.TruncateLen = int(algo.AlgTruncLen)
+ case nl.XFRMA_ENCAP:
+ encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
+ state.Encap = new(XfrmStateEncap)
+ state.Encap.Type = EncapType(encap.EncapType)
+ state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport))
+ state.Encap.DstPort = int(nl.Swap16(encap.EncapDport))
+ state.Encap.OriginalAddress = encap.EncapOa.ToIP()
+ case nl.XFRMA_MARK:
+ mark := nl.DeserializeXfrmMark(attr.Value[:])
+ state.Mark = new(XfrmMark)
+ state.Mark.Value = mark.Value
+ state.Mark.Mask = mark.Mask
}
- res = append(res, state)
}
- return res, nil
+
+ return &state, nil
+}
+
+// XfrmStateFlush will flush the xfrm state on the system.
+// proto = 0 means any transformation protocols
+// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]`
+func XfrmStateFlush(proto Proto) error {
+ return pkgHandle.XfrmStateFlush(proto)
+}
+
+// XfrmStateFlush will flush the xfrm state on the system.
+// proto = 0 means any transformation protocols
+// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]`
+func (h *Handle) XfrmStateFlush(proto Proto) error {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, syscall.NLM_F_ACK)
+
+ req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)})
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func limitsToLft(lmts XfrmStateLimits, lft *nl.XfrmLifetimeCfg) {
+ if lmts.ByteSoft != 0 {
+ lft.SoftByteLimit = lmts.ByteSoft
+ } else {
+ lft.SoftByteLimit = nl.XFRM_INF
+ }
+ if lmts.ByteHard != 0 {
+ lft.HardByteLimit = lmts.ByteHard
+ } else {
+ lft.HardByteLimit = nl.XFRM_INF
+ }
+ if lmts.PacketSoft != 0 {
+ lft.SoftPacketLimit = lmts.PacketSoft
+ } else {
+ lft.SoftPacketLimit = nl.XFRM_INF
+ }
+ if lmts.PacketHard != 0 {
+ lft.HardPacketLimit = lmts.PacketHard
+ } else {
+ lft.HardPacketLimit = nl.XFRM_INF
+ }
+ lft.SoftAddExpiresSeconds = lmts.TimeSoft
+ lft.HardAddExpiresSeconds = lmts.TimeHard
+ lft.SoftUseExpiresSeconds = lmts.TimeUseSoft
+ lft.HardUseExpiresSeconds = lmts.TimeUseHard
+}
+
+func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) {
+ *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft))
}
diff --git a/vendor/src/golang.org/x/crypto/LICENSE b/vendor/src/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000000..6a66aea5ea
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/golang.org/x/crypto/bcrypt/base64.go b/vendor/src/golang.org/x/crypto/bcrypt/base64.go
new file mode 100644
index 0000000000..fc31160908
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/bcrypt/base64.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import "encoding/base64"
+
+const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+var bcEncoding = base64.NewEncoding(alphabet)
+
+func base64Encode(src []byte) []byte {
+ n := bcEncoding.EncodedLen(len(src))
+ dst := make([]byte, n)
+ bcEncoding.Encode(dst, src)
+ for dst[n-1] == '=' {
+ n--
+ }
+ return dst[:n]
+}
+
+func base64Decode(src []byte) ([]byte, error) {
+ numOfEquals := 4 - (len(src) % 4)
+ for i := 0; i < numOfEquals; i++ {
+ src = append(src, '=')
+ }
+
+ dst := make([]byte, bcEncoding.DecodedLen(len(src)))
+ n, err := bcEncoding.Decode(dst, src)
+ if err != nil {
+ return nil, err
+ }
+ return dst[:n], nil
+}
diff --git a/vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go
new file mode 100644
index 0000000000..f8b807f9c3
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go
@@ -0,0 +1,294 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
+// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
+package bcrypt // import "golang.org/x/crypto/bcrypt"
+
+// The code is a port of Provos and Mazières's C implementation.
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "golang.org/x/crypto/blowfish"
+ "io"
+ "strconv"
+)
+
+const (
+ MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
+ MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
+ DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
+)
+
+// The error returned from CompareHashAndPassword when a password and hash do
+// not match.
+var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
+
+// The error returned from CompareHashAndPassword when a hash is too short to
+// be a bcrypt hash.
+var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
+
+// The error returned from CompareHashAndPassword when a hash was created with
+// a bcrypt algorithm newer than this implementation.
+type HashVersionTooNewError byte
+
+func (hv HashVersionTooNewError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
+}
+
+// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
+type InvalidHashPrefixError byte
+
+func (ih InvalidHashPrefixError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
+}
+
+type InvalidCostError int
+
+func (ic InvalidCostError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
+}
+
+const (
+ majorVersion = '2'
+ minorVersion = 'a'
+ maxSaltSize = 16
+ maxCryptedHashSize = 23
+ encodedSaltSize = 22
+ encodedHashSize = 31
+ minHashSize = 59
+)
+
+// magicCipherData is an IV for the 64 Blowfish encryption calls in
+// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
+var magicCipherData = []byte{
+ 0x4f, 0x72, 0x70, 0x68,
+ 0x65, 0x61, 0x6e, 0x42,
+ 0x65, 0x68, 0x6f, 0x6c,
+ 0x64, 0x65, 0x72, 0x53,
+ 0x63, 0x72, 0x79, 0x44,
+ 0x6f, 0x75, 0x62, 0x74,
+}
+
+type hashed struct {
+ hash []byte
+ salt []byte
+ cost int // allowed range is MinCost to MaxCost
+ major byte
+ minor byte
+}
+
+// GenerateFromPassword returns the bcrypt hash of the password at the given
+// cost. If the cost given is less than MinCost, the cost will be set to
+// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
+// to compare the returned hashed password with its cleartext version.
+func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
+ p, err := newFromPassword(password, cost)
+ if err != nil {
+ return nil, err
+ }
+ return p.Hash(), nil
+}
+
+// CompareHashAndPassword compares a bcrypt hashed password with its possible
+// plaintext equivalent. Returns nil on success, or an error on failure.
+func CompareHashAndPassword(hashedPassword, password []byte) error {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return err
+ }
+
+ otherHash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return err
+ }
+
+ otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
+ if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
+ return nil
+ }
+
+ return ErrMismatchedHashAndPassword
+}
+
+// Cost returns the hashing cost used to create the given hashed
+// password. When, in the future, the hashing cost of a password system needs
+// to be increased in order to adjust for greater computational power, this
+// function allows one to establish which passwords need to be updated.
+func Cost(hashedPassword []byte) (int, error) {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return 0, err
+ }
+ return p.cost, nil
+}
+
+func newFromPassword(password []byte, cost int) (*hashed, error) {
+ if cost < MinCost {
+ cost = DefaultCost
+ }
+ p := new(hashed)
+ p.major = majorVersion
+ p.minor = minorVersion
+
+ err := checkCost(cost)
+ if err != nil {
+ return nil, err
+ }
+ p.cost = cost
+
+ unencodedSalt := make([]byte, maxSaltSize)
+ _, err = io.ReadFull(rand.Reader, unencodedSalt)
+ if err != nil {
+ return nil, err
+ }
+
+ p.salt = base64Encode(unencodedSalt)
+ hash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return nil, err
+ }
+ p.hash = hash
+ return p, err
+}
+
+func newFromHash(hashedSecret []byte) (*hashed, error) {
+ if len(hashedSecret) < minHashSize {
+ return nil, ErrHashTooShort
+ }
+ p := new(hashed)
+ n, err := p.decodeVersion(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+ n, err = p.decodeCost(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+
+ // The "+2" is here because we'll have to append at most 2 '=' to the salt
+ // when base64 decoding it in expensiveBlowfishSetup().
+ p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
+ copy(p.salt, hashedSecret[:encodedSaltSize])
+
+ hashedSecret = hashedSecret[encodedSaltSize:]
+ p.hash = make([]byte, len(hashedSecret))
+ copy(p.hash, hashedSecret)
+
+ return p, nil
+}
+
+func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
+ cipherData := make([]byte, len(magicCipherData))
+ copy(cipherData, magicCipherData)
+
+ c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < 24; i += 8 {
+ for j := 0; j < 64; j++ {
+ c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
+ }
+ }
+
+ // Bug compatibility with C bcrypt implementations. We only encode 23 of
+ // the 24 bytes encrypted.
+ hsh := base64Encode(cipherData[:maxCryptedHashSize])
+ return hsh, nil
+}
+
+func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
+
+ csalt, err := base64Decode(salt)
+ if err != nil {
+ return nil, err
+ }
+
+ // Bug compatibility with C bcrypt implementations. They use the trailing
+ // NULL in the key string during expansion.
+ ckey := append(key, 0)
+
+ c, err := blowfish.NewSaltedCipher(ckey, csalt)
+ if err != nil {
+ return nil, err
+ }
+
+ var i, rounds uint64
+ rounds = 1 << cost
+ for i = 0; i < rounds; i++ {
+ blowfish.ExpandKey(ckey, c)
+ blowfish.ExpandKey(csalt, c)
+ }
+
+ return c, nil
+}
+
+func (p *hashed) Hash() []byte {
+ arr := make([]byte, 60)
+ arr[0] = '$'
+ arr[1] = p.major
+ n := 2
+ if p.minor != 0 {
+ arr[2] = p.minor
+ n = 3
+ }
+ arr[n] = '$'
+ n += 1
+ copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
+ n += 2
+ arr[n] = '$'
+ n += 1
+ copy(arr[n:], p.salt)
+ n += encodedSaltSize
+ copy(arr[n:], p.hash)
+ n += encodedHashSize
+ return arr[:n]
+}
+
+func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
+ if sbytes[0] != '$' {
+ return -1, InvalidHashPrefixError(sbytes[0])
+ }
+ if sbytes[1] > majorVersion {
+ return -1, HashVersionTooNewError(sbytes[1])
+ }
+ p.major = sbytes[1]
+ n := 3
+ if sbytes[2] != '$' {
+ p.minor = sbytes[2]
+ n++
+ }
+ return n, nil
+}
+
+// sbytes should begin where decodeVersion left off.
+func (p *hashed) decodeCost(sbytes []byte) (int, error) {
+ cost, err := strconv.Atoi(string(sbytes[0:2]))
+ if err != nil {
+ return -1, err
+ }
+ err = checkCost(cost)
+ if err != nil {
+ return -1, err
+ }
+ p.cost = cost
+ return 3, nil
+}
+
+func (p *hashed) String() string {
+ return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
+}
+
+func checkCost(cost int) error {
+ if cost < MinCost || cost > MaxCost {
+ return InvalidCostError(cost)
+ }
+ return nil
+}
diff --git a/vendor/src/golang.org/x/crypto/blowfish/block.go b/vendor/src/golang.org/x/crypto/blowfish/block.go
new file mode 100644
index 0000000000..9d80f19521
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/blowfish/block.go
@@ -0,0 +1,159 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blowfish
+
+// getNextWord returns the next big-endian uint32 value from the byte slice
+// at the given position in a circular manner, updating the position.
+func getNextWord(b []byte, pos *int) uint32 {
+ var w uint32
+ j := *pos
+ for i := 0; i < 4; i++ {
+ w = w<<8 | uint32(b[j])
+ j++
+ if j >= len(b) {
+ j = 0
+ }
+ }
+ *pos = j
+ return w
+}
+
+// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
+// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
+// pi and substitution tables for calls to Encrypt. This is used, primarily,
+// by the bcrypt package to reuse the Blowfish key schedule during its
+// set up. It's unlikely that you need to use this directly.
+func ExpandKey(key []byte, c *Cipher) {
+ j := 0
+ for i := 0; i < 18; i++ {
+ // Using inlined getNextWord for performance.
+ var d uint32
+ for k := 0; k < 4; k++ {
+ d = d<<8 | uint32(key[j])
+ j++
+ if j >= len(key) {
+ j = 0
+ }
+ }
+ c.p[i] ^= d
+ }
+
+ var l, r uint32
+ for i := 0; i < 18; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.p[i], c.p[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s0[i], c.s0[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s1[i], c.s1[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s2[i], c.s2[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s3[i], c.s3[i+1] = l, r
+ }
+}
+
+// This is similar to ExpandKey, but folds the salt during the key
+// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
+// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
+// and specializing it here is useful.
+func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
+ j := 0
+ for i := 0; i < 18; i++ {
+ c.p[i] ^= getNextWord(key, &j)
+ }
+
+ j = 0
+ var l, r uint32
+ for i := 0; i < 18; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.p[i], c.p[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s0[i], c.s0[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s1[i], c.s1[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s2[i], c.s2[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s3[i], c.s3[i+1] = l, r
+ }
+}
+
+func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+ xl, xr := l, r
+ xl ^= c.p[0]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
+ xr ^= c.p[17]
+ return xr, xl
+}
+
+func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+ xl, xr := l, r
+ xl ^= c.p[17]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
+ xr ^= c.p[0]
+ return xr, xl
+}
diff --git a/vendor/src/golang.org/x/crypto/blowfish/cipher.go b/vendor/src/golang.org/x/crypto/blowfish/cipher.go
new file mode 100644
index 0000000000..542984aa8d
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/blowfish/cipher.go
@@ -0,0 +1,91 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
+package blowfish // import "golang.org/x/crypto/blowfish"
+
+// The code is a port of Bruce Schneier's C implementation.
+// See http://www.schneier.com/blowfish.html.
+
+import "strconv"
+
+// The Blowfish block size in bytes.
+const BlockSize = 8
+
+// A Cipher is an instance of Blowfish encryption using a particular key.
+type Cipher struct {
+ p [18]uint32
+ s0, s1, s2, s3 [256]uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+ return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a Cipher.
+// The key argument should be the Blowfish key, from 1 to 56 bytes.
+func NewCipher(key []byte) (*Cipher, error) {
+ var result Cipher
+ if k := len(key); k < 1 || k > 56 {
+ return nil, KeySizeError(k)
+ }
+ initCipher(&result)
+ ExpandKey(key, &result)
+ return &result, nil
+}
+
+// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
+// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
+// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
+// bytes.
+func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
+ if len(salt) == 0 {
+ return NewCipher(key)
+ }
+ var result Cipher
+ if k := len(key); k < 1 {
+ return nil, KeySizeError(k)
+ }
+ initCipher(&result)
+ expandKeyWithSalt(key, salt, &result)
+ return &result, nil
+}
+
+// BlockSize returns the Blowfish block size, 8 bytes.
+// It is necessary to satisfy the Block interface in the
+// package "crypto/cipher".
+func (c *Cipher) BlockSize() int { return BlockSize }
+
+// Encrypt encrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+// Note that for amounts of data larger than a block,
+// it is not safe to just call Encrypt on successive blocks;
+// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
+func (c *Cipher) Encrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+ l, r = encryptBlock(l, r, c)
+ dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+ dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+// Decrypt decrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+func (c *Cipher) Decrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+ l, r = decryptBlock(l, r, c)
+ dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+ dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+func initCipher(c *Cipher) {
+ copy(c.p[0:], p[0:])
+ copy(c.s0[0:], s0[0:])
+ copy(c.s1[0:], s1[0:])
+ copy(c.s2[0:], s2[0:])
+ copy(c.s3[0:], s3[0:])
+}
diff --git a/vendor/src/golang.org/x/crypto/blowfish/const.go b/vendor/src/golang.org/x/crypto/blowfish/const.go
new file mode 100644
index 0000000000..8c5ee4cb08
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/blowfish/const.go
@@ -0,0 +1,199 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The startup permutation array and substitution boxes.
+// They are the hexadecimal digits of PI; see:
+// http://www.schneier.com/code/constants.txt.
+
+package blowfish
+
+var s0 = [256]uint32{
+ 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
+ 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
+ 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
+ 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
+ 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
+ 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
+ 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
+ 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
+ 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
+ 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
+ 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
+ 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
+ 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
+ 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
+ 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
+ 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
+ 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
+ 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
+ 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
+ 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
+ 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
+ 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
+ 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
+ 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
+ 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
+ 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
+ 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
+ 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
+ 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
+ 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
+ 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
+ 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
+ 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
+ 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
+ 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
+ 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
+ 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
+ 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
+ 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
+ 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
+ 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
+ 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
+ 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
+}
+
+var s1 = [256]uint32{
+ 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
+ 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
+ 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
+ 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
+ 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
+ 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
+ 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
+ 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
+ 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
+ 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
+ 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
+ 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
+ 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
+ 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
+ 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
+ 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
+ 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
+ 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
+ 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
+ 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
+ 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
+ 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
+ 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
+ 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
+ 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
+ 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
+ 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
+ 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
+ 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
+ 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
+ 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
+ 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
+ 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
+ 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
+ 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
+ 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
+ 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
+ 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
+ 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
+ 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
+ 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
+ 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
+ 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
+}
+
+var s2 = [256]uint32{
+ 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
+ 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
+ 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
+ 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
+ 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
+ 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
+ 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
+ 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
+ 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
+ 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
+ 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
+ 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
+ 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
+ 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
+ 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
+ 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
+ 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
+ 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
+ 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
+ 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
+ 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
+ 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
+ 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
+ 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
+ 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
+ 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
+ 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
+ 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
+ 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
+ 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
+ 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
+ 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
+ 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
+ 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
+ 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
+ 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
+ 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
+ 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
+ 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
+ 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
+ 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
+ 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
+ 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
+}
+
+var s3 = [256]uint32{
+ 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
+ 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
+ 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
+ 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
+ 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
+ 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
+ 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
+ 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
+ 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
+ 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
+ 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
+ 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
+ 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
+ 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
+ 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
+ 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
+ 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
+ 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
+ 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
+ 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
+ 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
+ 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
+ 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
+ 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
+ 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
+ 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
+ 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
+ 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
+ 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
+ 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
+ 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
+ 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
+ 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
+ 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
+ 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
+ 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
+ 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
+ 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
+ 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
+ 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
+ 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
+ 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
+ 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
+}
+
+var p = [18]uint32{
+ 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
+ 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
+ 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go
new file mode 100644
index 0000000000..284d2a68f1
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "errors"
+ "unicode/utf16"
+)
+
+// bmpString returns s encoded in UCS-2 with a zero terminator.
+func bmpString(s string) ([]byte, error) {
+ // References:
+ // https://tools.ietf.org/html/rfc7292#appendix-B.1
+ // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
+ // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
+ // EncodeRune returns 0xfffd if the rune does not need special encoding
+ // - the above RFC provides the info that BMPStrings are NULL terminated.
+
+ ret := make([]byte, 0, 2*len(s)+2)
+
+ for _, r := range s {
+ if t, _ := utf16.EncodeRune(r); t != 0xfffd {
+ return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2")
+ }
+ ret = append(ret, byte(r/256), byte(r%256))
+ }
+
+ return append(ret, 0, 0), nil
+}
+
+func decodeBMPString(bmpString []byte) (string, error) {
+ if len(bmpString)%2 != 0 {
+ return "", errors.New("pkcs12: odd-length BMP string")
+ }
+
+ // strip terminator if present
+ if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
+ bmpString = bmpString[:l-2]
+ }
+
+ s := make([]uint16, 0, len(bmpString)/2)
+ for len(bmpString) > 0 {
+ s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
+ bmpString = bmpString[2:]
+ }
+
+ return string(utf16.Decode(s)), nil
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/crypto.go b/vendor/src/golang.org/x/crypto/pkcs12/crypto.go
new file mode 100644
index 0000000000..4bd4470ec0
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/crypto.go
@@ -0,0 +1,131 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+
+ "golang.org/x/crypto/pkcs12/internal/rc2"
+)
+
+var (
+ oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
+ oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6})
+)
+
+// pbeCipher is an abstraction of a PKCS#12 cipher.
+type pbeCipher interface {
+ // create returns a cipher.Block given a key.
+ create(key []byte) (cipher.Block, error)
+ // deriveKey returns a key derived from the given password and salt.
+ deriveKey(salt, password []byte, iterations int) []byte
+ // deriveKey returns an IV derived from the given password and salt.
+ deriveIV(salt, password []byte, iterations int) []byte
+}
+
+type shaWithTripleDESCBC struct{}
+
+func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) {
+ return des.NewTripleDESCipher(key)
+}
+
+func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24)
+}
+
+func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
+}
+
+type shaWith40BitRC2CBC struct{}
+
+func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) {
+ return rc2.New(key, len(key)*8)
+}
+
+func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5)
+}
+
+func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
+}
+
+type pbeParams struct {
+ Salt []byte
+ Iterations int
+}
+
+func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) {
+ var cipherType pbeCipher
+
+ switch {
+ case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC):
+ cipherType = shaWithTripleDESCBC{}
+ case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC):
+ cipherType = shaWith40BitRC2CBC{}
+ default:
+ return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported")
+ }
+
+ var params pbeParams
+ if err := unmarshal(algorithm.Parameters.FullBytes, &params); err != nil {
+ return nil, 0, err
+ }
+
+ key := cipherType.deriveKey(params.Salt, password, params.Iterations)
+ iv := cipherType.deriveIV(params.Salt, password, params.Iterations)
+
+ block, err := cipherType.create(key)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil
+}
+
+func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
+ cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password)
+ if err != nil {
+ return nil, err
+ }
+
+ encrypted := info.Data()
+ if len(encrypted) == 0 {
+ return nil, errors.New("pkcs12: empty encrypted data")
+ }
+ if len(encrypted)%blockSize != 0 {
+ return nil, errors.New("pkcs12: input is not a multiple of the block size")
+ }
+ decrypted = make([]byte, len(encrypted))
+ cbc.CryptBlocks(decrypted, encrypted)
+
+ psLen := int(decrypted[len(decrypted)-1])
+ if psLen == 0 || psLen > blockSize {
+ return nil, ErrDecryption
+ }
+
+ if len(decrypted) < psLen {
+ return nil, ErrDecryption
+ }
+ ps := decrypted[len(decrypted)-psLen:]
+ decrypted = decrypted[:len(decrypted)-psLen]
+ if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 {
+ return nil, ErrDecryption
+ }
+
+ return
+}
+
+// decryptable abstracts a object that contains ciphertext.
+type decryptable interface {
+ Algorithm() pkix.AlgorithmIdentifier
+ Data() []byte
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/errors.go b/vendor/src/golang.org/x/crypto/pkcs12/errors.go
new file mode 100644
index 0000000000..7377ce6fb2
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/errors.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import "errors"
+
+var (
+ // ErrDecryption represents a failure to decrypt the input.
+ ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding")
+
+ // ErrIncorrectPassword is returned when an incorrect password is detected.
+ // Usually, P12/PFX data is signed to be able to verify the password.
+ ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect")
+)
+
+// NotImplementedError indicates that the input is not currently supported.
+type NotImplementedError string
+
+func (e NotImplementedError) Error() string {
+ return "pkcs12: " + string(e)
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
new file mode 100644
index 0000000000..8c7090258c
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
@@ -0,0 +1,274 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rc2 implements the RC2 cipher
+/*
+https://www.ietf.org/rfc/rfc2268.txt
+http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf
+
+This code is licensed under the MIT license.
+*/
+package rc2
+
+import (
+ "crypto/cipher"
+ "encoding/binary"
+)
+
+// The rc2 block size in bytes
+const BlockSize = 8
+
+type rc2Cipher struct {
+ k [64]uint16
+}
+
+// New returns a new rc2 cipher with the given key and effective key length t1
+func New(key []byte, t1 int) (cipher.Block, error) {
+ // TODO(dgryski): error checking for key length
+ return &rc2Cipher{
+ k: expandKey(key, t1),
+ }, nil
+}
+
+func (*rc2Cipher) BlockSize() int { return BlockSize }
+
+var piTable = [256]byte{
+ 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d,
+ 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2,
+ 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32,
+ 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82,
+ 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc,
+ 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26,
+ 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03,
+ 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7,
+ 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a,
+ 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec,
+ 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39,
+ 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31,
+ 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9,
+ 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9,
+ 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e,
+ 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad,
+}
+
+func expandKey(key []byte, t1 int) [64]uint16 {
+
+ l := make([]byte, 128)
+ copy(l, key)
+
+ var t = len(key)
+ var t8 = (t1 + 7) / 8
+ var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8))))
+
+ for i := len(key); i < 128; i++ {
+ l[i] = piTable[l[i-1]+l[uint8(i-t)]]
+ }
+
+ l[128-t8] = piTable[l[128-t8]&tm]
+
+ for i := 127 - t8; i >= 0; i-- {
+ l[i] = piTable[l[i+1]^l[i+t8]]
+ }
+
+ var k [64]uint16
+
+ for i := range k {
+ k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256
+ }
+
+ return k
+}
+
+func rotl16(x uint16, b uint) uint16 {
+ return (x >> (16 - b)) | (x << b)
+}
+
+func (c *rc2Cipher) Encrypt(dst, src []byte) {
+
+ r0 := binary.LittleEndian.Uint16(src[0:])
+ r1 := binary.LittleEndian.Uint16(src[2:])
+ r2 := binary.LittleEndian.Uint16(src[4:])
+ r3 := binary.LittleEndian.Uint16(src[6:])
+
+ var j int
+
+ for j <= 16 {
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+
+ }
+
+ r0 = r0 + c.k[r3&63]
+ r1 = r1 + c.k[r0&63]
+ r2 = r2 + c.k[r1&63]
+ r3 = r3 + c.k[r2&63]
+
+ for j <= 40 {
+
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+
+ }
+
+ r0 = r0 + c.k[r3&63]
+ r1 = r1 + c.k[r0&63]
+ r2 = r2 + c.k[r1&63]
+ r3 = r3 + c.k[r2&63]
+
+ for j <= 60 {
+
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+ }
+
+ binary.LittleEndian.PutUint16(dst[0:], r0)
+ binary.LittleEndian.PutUint16(dst[2:], r1)
+ binary.LittleEndian.PutUint16(dst[4:], r2)
+ binary.LittleEndian.PutUint16(dst[6:], r3)
+}
+
+func (c *rc2Cipher) Decrypt(dst, src []byte) {
+
+ r0 := binary.LittleEndian.Uint16(src[0:])
+ r1 := binary.LittleEndian.Uint16(src[2:])
+ r2 := binary.LittleEndian.Uint16(src[4:])
+ r3 := binary.LittleEndian.Uint16(src[6:])
+
+ j := 63
+
+ for j >= 44 {
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+ }
+
+ r3 = r3 - c.k[r2&63]
+ r2 = r2 - c.k[r1&63]
+ r1 = r1 - c.k[r0&63]
+ r0 = r0 - c.k[r3&63]
+
+ for j >= 20 {
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+
+ }
+
+ r3 = r3 - c.k[r2&63]
+ r2 = r2 - c.k[r1&63]
+ r1 = r1 - c.k[r0&63]
+ r0 = r0 - c.k[r3&63]
+
+ for j >= 0 {
+
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+
+ }
+
+ binary.LittleEndian.PutUint16(dst[0:], r0)
+ binary.LittleEndian.PutUint16(dst[2:], r1)
+ binary.LittleEndian.PutUint16(dst[4:], r2)
+ binary.LittleEndian.PutUint16(dst[6:], r3)
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/mac.go b/vendor/src/golang.org/x/crypto/pkcs12/mac.go
new file mode 100644
index 0000000000..5f38aa7de8
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/mac.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+)
+
+type macData struct {
+ Mac digestInfo
+ MacSalt []byte
+ Iterations int `asn1:"optional,default:1"`
+}
+
+// from PKCS#7:
+type digestInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ Digest []byte
+}
+
+var (
+ oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
+)
+
+func verifyMac(macData *macData, message, password []byte) error {
+ if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) {
+ return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String())
+ }
+
+ key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20)
+
+ mac := hmac.New(sha1.New, key)
+ mac.Write(message)
+ expectedMAC := mac.Sum(nil)
+
+ if !hmac.Equal(macData.Mac.Digest, expectedMAC) {
+ return ErrIncorrectPassword
+ }
+ return nil
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go
new file mode 100644
index 0000000000..5c419d41e3
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go
@@ -0,0 +1,170 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "math/big"
+)
+
+var (
+ one = big.NewInt(1)
+)
+
+// sha1Sum returns the SHA-1 hash of in.
+func sha1Sum(in []byte) []byte {
+ sum := sha1.Sum(in)
+ return sum[:]
+}
+
+// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of
+// repeats of pattern.
+func fillWithRepeats(pattern []byte, v int) []byte {
+ if len(pattern) == 0 {
+ return nil
+ }
+ outputLen := v * ((len(pattern) + v - 1) / v)
+ return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen]
+}
+
+func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) {
+ // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments
+
+ // Let H be a hash function built around a compression function f:
+
+ // Z_2^u x Z_2^v -> Z_2^u
+
+ // (that is, H has a chaining variable and output of length u bits, and
+ // the message input to the compression function of H is v bits). The
+ // values for u and v are as follows:
+
+ // HASH FUNCTION VALUE u VALUE v
+ // MD2, MD5 128 512
+ // SHA-1 160 512
+ // SHA-224 224 512
+ // SHA-256 256 512
+ // SHA-384 384 1024
+ // SHA-512 512 1024
+ // SHA-512/224 224 1024
+ // SHA-512/256 256 1024
+
+ // Furthermore, let r be the iteration count.
+
+ // We assume here that u and v are both multiples of 8, as are the
+ // lengths of the password and salt strings (which we denote by p and s,
+ // respectively) and the number n of pseudorandom bits required. In
+ // addition, u and v are of course non-zero.
+
+ // For information on security considerations for MD5 [19], see [25] and
+ // [1], and on those for MD2, see [18].
+
+ // The following procedure can be used to produce pseudorandom bits for
+ // a particular "purpose" that is identified by a byte called "ID".
+ // This standard specifies 3 different values for the ID byte:
+
+ // 1. If ID=1, then the pseudorandom bits being produced are to be used
+ // as key material for performing encryption or decryption.
+
+ // 2. If ID=2, then the pseudorandom bits being produced are to be used
+ // as an IV (Initial Value) for encryption or decryption.
+
+ // 3. If ID=3, then the pseudorandom bits being produced are to be used
+ // as an integrity key for MACing.
+
+ // 1. Construct a string, D (the "diversifier"), by concatenating v/8
+ // copies of ID.
+ var D []byte
+ for i := 0; i < v; i++ {
+ D = append(D, ID)
+ }
+
+ // 2. Concatenate copies of the salt together to create a string S of
+ // length v(ceiling(s/v)) bits (the final copy of the salt may be
+ // truncated to create S). Note that if the salt is the empty
+ // string, then so is S.
+
+ S := fillWithRepeats(salt, v)
+
+ // 3. Concatenate copies of the password together to create a string P
+ // of length v(ceiling(p/v)) bits (the final copy of the password
+ // may be truncated to create P). Note that if the password is the
+ // empty string, then so is P.
+
+ P := fillWithRepeats(password, v)
+
+ // 4. Set I=S||P to be the concatenation of S and P.
+ I := append(S, P...)
+
+ // 5. Set c=ceiling(n/u).
+ c := (size + u - 1) / u
+
+ // 6. For i=1, 2, ..., c, do the following:
+ A := make([]byte, c*20)
+ var IjBuf []byte
+ for i := 0; i < c; i++ {
+ // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1,
+ // H(H(H(... H(D||I))))
+ Ai := hash(append(D, I...))
+ for j := 1; j < r; j++ {
+ Ai = hash(Ai)
+ }
+ copy(A[i*20:], Ai[:])
+
+ if i < c-1 { // skip on last iteration
+ // B. Concatenate copies of Ai to create a string B of length v
+ // bits (the final copy of Ai may be truncated to create B).
+ var B []byte
+ for len(B) < v {
+ B = append(B, Ai[:]...)
+ }
+ B = B[:v]
+
+ // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
+ // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
+ // setting I_j=(I_j+B+1) mod 2^v for each j.
+ {
+ Bbi := new(big.Int).SetBytes(B)
+ Ij := new(big.Int)
+
+ for j := 0; j < len(I)/v; j++ {
+ Ij.SetBytes(I[j*v : (j+1)*v])
+ Ij.Add(Ij, Bbi)
+ Ij.Add(Ij, one)
+ Ijb := Ij.Bytes()
+ // We expect Ijb to be exactly v bytes,
+ // if it is longer or shorter we must
+ // adjust it accordingly.
+ if len(Ijb) > v {
+ Ijb = Ijb[len(Ijb)-v:]
+ }
+ if len(Ijb) < v {
+ if IjBuf == nil {
+ IjBuf = make([]byte, v)
+ }
+ bytesShort := v - len(Ijb)
+ for i := 0; i < bytesShort; i++ {
+ IjBuf[i] = 0
+ }
+ copy(IjBuf[bytesShort:], Ijb)
+ Ijb = IjBuf
+ }
+ copy(I[j*v:(j+1)*v], Ijb)
+ }
+ }
+ }
+ }
+ // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom
+ // bit string, A.
+
+ // 8. Use the first n bits of A as the output of this entire process.
+ return A[:size]
+
+ // If the above process is being used to generate a DES key, the process
+ // should be used to create 64 random bits, and the key's parity bits
+ // should be set after the 64 bits have been produced. Similar concerns
+ // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any
+ // similar keys with parity bits "built into them".
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go
new file mode 100644
index 0000000000..ad6341e60f
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go
@@ -0,0 +1,342 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkcs12 implements some of PKCS#12.
+//
+// This implementation is distilled from https://tools.ietf.org/html/rfc7292
+// and referenced documents. It is intended for decoding P12/PFX-stored
+// certificates and keys for use with the crypto/tls package.
+package pkcs12
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1})
+ oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6})
+
+ oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20})
+ oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21})
+ oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1})
+)
+
+type pfxPdu struct {
+ Version int
+ AuthSafe contentInfo
+ MacData macData `asn1:"optional"`
+}
+
+type contentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
+}
+
+type encryptedData struct {
+ Version int
+ EncryptedContentInfo encryptedContentInfo
+}
+
+type encryptedContentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedContent []byte `asn1:"tag:0,optional"`
+}
+
+func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier {
+ return i.ContentEncryptionAlgorithm
+}
+
+func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent }
+
+type safeBag struct {
+ Id asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"tag:0,explicit"`
+ Attributes []pkcs12Attribute `asn1:"set,optional"`
+}
+
+type pkcs12Attribute struct {
+ Id asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"set"`
+}
+
+type encryptedPrivateKeyInfo struct {
+ AlgorithmIdentifier pkix.AlgorithmIdentifier
+ EncryptedData []byte
+}
+
+func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier {
+ return i.AlgorithmIdentifier
+}
+
+func (i encryptedPrivateKeyInfo) Data() []byte {
+ return i.EncryptedData
+}
+
+// PEM block types
+const (
+ certificateType = "CERTIFICATE"
+ privateKeyType = "PRIVATE KEY"
+)
+
+// unmarshal calls asn1.Unmarshal, but also returns an error if there is any
+// trailing data after unmarshaling.
+func unmarshal(in []byte, out interface{}) error {
+ trailing, err := asn1.Unmarshal(in, out)
+ if err != nil {
+ return err
+ }
+ if len(trailing) != 0 {
+ return errors.New("pkcs12: trailing data found")
+ }
+ return nil
+}
+
+// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks.
+func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) {
+ encodedPassword, err := bmpString(password)
+ if err != nil {
+ return nil, ErrIncorrectPassword
+ }
+
+ bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
+
+ blocks := make([]*pem.Block, 0, len(bags))
+ for _, bag := range bags {
+ block, err := convertBag(&bag, encodedPassword)
+ if err != nil {
+ return nil, err
+ }
+ blocks = append(blocks, block)
+ }
+
+ return blocks, nil
+}
+
+func convertBag(bag *safeBag, password []byte) (*pem.Block, error) {
+ block := &pem.Block{
+ Headers: make(map[string]string),
+ }
+
+ for _, attribute := range bag.Attributes {
+ k, v, err := convertAttribute(&attribute)
+ if err != nil {
+ return nil, err
+ }
+ block.Headers[k] = v
+ }
+
+ switch {
+ case bag.Id.Equal(oidCertBag):
+ block.Type = certificateType
+ certsData, err := decodeCertBag(bag.Value.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ block.Bytes = certsData
+ case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
+ block.Type = privateKeyType
+
+ key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password)
+ if err != nil {
+ return nil, err
+ }
+
+ switch key := key.(type) {
+ case *rsa.PrivateKey:
+ block.Bytes = x509.MarshalPKCS1PrivateKey(key)
+ case *ecdsa.PrivateKey:
+ block.Bytes, err = x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, errors.New("found unknown private key type in PKCS#8 wrapping")
+ }
+ default:
+ return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String())
+ }
+ return block, nil
+}
+
+func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) {
+ isString := false
+
+ switch {
+ case attribute.Id.Equal(oidFriendlyName):
+ key = "friendlyName"
+ isString = true
+ case attribute.Id.Equal(oidLocalKeyID):
+ key = "localKeyId"
+ case attribute.Id.Equal(oidMicrosoftCSPName):
+ // This key is chosen to match OpenSSL.
+ key = "Microsoft CSP Name"
+ isString = true
+ default:
+ return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String())
+ }
+
+ if isString {
+ if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil {
+ return "", "", err
+ }
+ if value, err = decodeBMPString(attribute.Value.Bytes); err != nil {
+ return "", "", err
+ }
+ } else {
+ var id []byte
+ if err := unmarshal(attribute.Value.Bytes, &id); err != nil {
+ return "", "", err
+ }
+ value = hex.EncodeToString(id)
+ }
+
+ return key, value, nil
+}
+
+// Decode extracts a certificate and private key from pfxData. This function
+// assumes that there is only one certificate and only one private key in the
+// pfxData.
+func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
+ encodedPassword, err := bmpString(password)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(bags) != 2 {
+ err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU")
+ return
+ }
+
+ for _, bag := range bags {
+ switch {
+ case bag.Id.Equal(oidCertBag):
+ if certificate != nil {
+ err = errors.New("pkcs12: expected exactly one certificate bag")
+ }
+
+ certsData, err := decodeCertBag(bag.Value.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ certs, err := x509.ParseCertificates(certsData)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(certs) != 1 {
+ err = errors.New("pkcs12: expected exactly one certificate in the certBag")
+ return nil, nil, err
+ }
+ certificate = certs[0]
+
+ case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
+ if privateKey != nil {
+ err = errors.New("pkcs12: expected exactly one key bag")
+ }
+
+ if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ if certificate == nil {
+ return nil, nil, errors.New("pkcs12: certificate missing")
+ }
+ if privateKey == nil {
+ return nil, nil, errors.New("pkcs12: private key missing")
+ }
+
+ return
+}
+
+func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) {
+ pfx := new(pfxPdu)
+ if err := unmarshal(p12Data, pfx); err != nil {
+ return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error())
+ }
+
+ if pfx.Version != 3 {
+ return nil, nil, NotImplementedError("can only decode v3 PFX PDU's")
+ }
+
+ if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) {
+ return nil, nil, NotImplementedError("only password-protected PFX is implemented")
+ }
+
+ // unmarshal the explicit bytes in the content for type 'data'
+ if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil {
+ return nil, nil, err
+ }
+
+ if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 {
+ return nil, nil, errors.New("pkcs12: no MAC in data")
+ }
+
+ if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil {
+ if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 {
+ // some implementations use an empty byte array
+ // for the empty string password try one more
+ // time with empty-empty password
+ password = nil
+ err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ var authenticatedSafe []contentInfo
+ if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil {
+ return nil, nil, err
+ }
+
+ if len(authenticatedSafe) != 2 {
+ return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe")
+ }
+
+ for _, ci := range authenticatedSafe {
+ var data []byte
+
+ switch {
+ case ci.ContentType.Equal(oidDataContentType):
+ if err := unmarshal(ci.Content.Bytes, &data); err != nil {
+ return nil, nil, err
+ }
+ case ci.ContentType.Equal(oidEncryptedDataContentType):
+ var encryptedData encryptedData
+ if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil {
+ return nil, nil, err
+ }
+ if encryptedData.Version != 0 {
+ return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported")
+ }
+ if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil {
+ return nil, nil, err
+ }
+ default:
+ return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe")
+ }
+
+ var safeContents []safeBag
+ if err := unmarshal(data, &safeContents); err != nil {
+ return nil, nil, err
+ }
+ bags = append(bags, safeContents...)
+ }
+
+ return bags, password, nil
+}
diff --git a/vendor/src/golang.org/x/crypto/pkcs12/safebags.go b/vendor/src/golang.org/x/crypto/pkcs12/safebags.go
new file mode 100644
index 0000000000..def1f7b98d
--- /dev/null
+++ b/vendor/src/golang.org/x/crypto/pkcs12/safebags.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+)
+
+var (
+ // see https://tools.ietf.org/html/rfc7292#appendix-D
+ oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1})
+ oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2})
+ oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3})
+)
+
+type certBag struct {
+ Id asn1.ObjectIdentifier
+ Data []byte `asn1:"tag:0,explicit"`
+}
+
+func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) {
+ pkinfo := new(encryptedPrivateKeyInfo)
+ if err = unmarshal(asn1Data, pkinfo); err != nil {
+ return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error())
+ }
+
+ pkData, err := pbDecrypt(pkinfo, password)
+ if err != nil {
+ return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error())
+ }
+
+ ret := new(asn1.RawValue)
+ if err = unmarshal(pkData, ret); err != nil {
+ return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error())
+ }
+
+ if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil {
+ return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error())
+ }
+
+ return privateKey, nil
+}
+
+func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) {
+ bag := new(certBag)
+ if err := unmarshal(asn1Data, bag); err != nil {
+ return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error())
+ }
+ if !bag.Id.Equal(oidCertTypeX509Certificate) {
+ return nil, NotImplementedError("only X509 certificates are supported")
+ }
+ return bag.Data, nil
+}
diff --git a/volume/volume.go b/volume/volume.go
index 1b57d85087..9bb8b7cdf0 100644
--- a/volume/volume.go
+++ b/volume/volume.go
@@ -140,6 +140,17 @@ func (m *MountPoint) Path() string {
return m.Source
}
+// Type returns the type of mount point
+func (m *MountPoint) Type() string {
+ if m.Name != "" {
+ return "VOLUME"
+ }
+ if m.Source != "" {
+ return "BIND"
+ }
+ return "EPHEMERAL"
+}
+
// ParseVolumesFrom ensures that the supplied volumes-from is valid.
func ParseVolumesFrom(spec string) (string, string, error) {
if len(spec) == 0 {